Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ c2c2a903

History | View | Annotate | Download (185.6 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 8d14b30d Iustin Pop
from ganeti import serializer
45 d61df03e Iustin Pop
46 d61df03e Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 d465bdc8 Guido Trotter
    - implement ExpandNames
52 d465bdc8 Guido Trotter
    - implement CheckPrereq
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 05f86716 Guido Trotter
    - optionally redefine their run requirements:
57 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
58 05f86716 Guido Trotter
        REQ_WSSTORE: the LU needs a writable SimpleStore
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 a8083063 Iustin Pop
  REQ_MASTER = True
68 05f86716 Guido Trotter
  REQ_WSSTORE = False
69 7e55040e Guido Trotter
  REQ_BGL = True
70 a8083063 Iustin Pop
71 77b657a3 Guido Trotter
  def __init__(self, processor, op, context, sstore):
72 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
75 a8083063 Iustin Pop
    validity.
76 a8083063 Iustin Pop

77 a8083063 Iustin Pop
    """
78 5bfac263 Iustin Pop
    self.proc = processor
79 a8083063 Iustin Pop
    self.op = op
80 77b657a3 Guido Trotter
    self.cfg = context.cfg
81 a8083063 Iustin Pop
    self.sstore = sstore
82 77b657a3 Guido Trotter
    self.context = context
83 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
84 d465bdc8 Guido Trotter
    self.needed_locks = None
85 6683bba2 Guido Trotter
    self.acquired_locks = {}
86 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
87 ca2a79e1 Guido Trotter
    self.add_locks = {}
88 ca2a79e1 Guido Trotter
    self.remove_locks = {}
89 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
90 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
91 c92b310a Michael Hanselmann
    self.__ssh = None
92 c92b310a Michael Hanselmann
93 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
94 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
95 a8083063 Iustin Pop
      if attr_val is None:
96 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
97 3ecf6786 Iustin Pop
                                   attr_name)
98 c6d58a2b Michael Hanselmann
99 f64c9de6 Guido Trotter
    if not self.cfg.IsCluster():
100 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
101 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
102 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
103 c6d58a2b Michael Hanselmann
      master = sstore.GetMasterNode()
104 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
105 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
106 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
107 a8083063 Iustin Pop
108 c92b310a Michael Hanselmann
  def __GetSSH(self):
109 c92b310a Michael Hanselmann
    """Returns the SshRunner object
110 c92b310a Michael Hanselmann

111 c92b310a Michael Hanselmann
    """
112 c92b310a Michael Hanselmann
    if not self.__ssh:
113 1ff08570 Michael Hanselmann
      self.__ssh = ssh.SshRunner(self.sstore)
114 c92b310a Michael Hanselmann
    return self.__ssh
115 c92b310a Michael Hanselmann
116 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
117 c92b310a Michael Hanselmann
118 d465bdc8 Guido Trotter
  def ExpandNames(self):
119 d465bdc8 Guido Trotter
    """Expand names for this LU.
120 d465bdc8 Guido Trotter

121 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
122 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
123 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
124 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
125 d465bdc8 Guido Trotter

126 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
127 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
128 d465bdc8 Guido Trotter
    as values. Rules:
129 d465bdc8 Guido Trotter
      - Use an empty dict if you don't need any lock
130 d465bdc8 Guido Trotter
      - If you don't need any lock at a particular level omit that level
131 d465bdc8 Guido Trotter
      - Don't put anything for the BGL level
132 e310b019 Guido Trotter
      - If you want all locks at a level use locking.ALL_SET as a value
133 d465bdc8 Guido Trotter

134 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
135 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
136 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
137 3977a4c1 Guido Trotter

138 d465bdc8 Guido Trotter
    Examples:
139 d465bdc8 Guido Trotter
    # Acquire all nodes and one instance
140 d465bdc8 Guido Trotter
    self.needed_locks = {
141 e310b019 Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
142 3a5d7305 Guido Trotter
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
143 d465bdc8 Guido Trotter
    }
144 d465bdc8 Guido Trotter
    # Acquire just two nodes
145 d465bdc8 Guido Trotter
    self.needed_locks = {
146 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
147 d465bdc8 Guido Trotter
    }
148 d465bdc8 Guido Trotter
    # Acquire no locks
149 d465bdc8 Guido Trotter
    self.needed_locks = {} # No, you can't leave it to the default value None
150 d465bdc8 Guido Trotter

151 d465bdc8 Guido Trotter
    """
152 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
153 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
154 d465bdc8 Guido Trotter
    # time.
155 d465bdc8 Guido Trotter
    if self.REQ_BGL:
156 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
157 d465bdc8 Guido Trotter
    else:
158 d465bdc8 Guido Trotter
      raise NotImplementedError
159 d465bdc8 Guido Trotter
160 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
161 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
162 fb8dcb62 Guido Trotter

163 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
164 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
165 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
166 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
167 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
168 fb8dcb62 Guido Trotter
    default it does nothing.
169 fb8dcb62 Guido Trotter

170 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
171 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
172 fb8dcb62 Guido Trotter

173 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
174 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
175 fb8dcb62 Guido Trotter

176 fb8dcb62 Guido Trotter
    """
177 fb8dcb62 Guido Trotter
178 a8083063 Iustin Pop
  def CheckPrereq(self):
179 a8083063 Iustin Pop
    """Check prerequisites for this LU.
180 a8083063 Iustin Pop

181 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
182 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
183 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
184 a8083063 Iustin Pop
    allowed.
185 a8083063 Iustin Pop

186 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
187 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
188 a8083063 Iustin Pop

189 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
190 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
191 a8083063 Iustin Pop

192 a8083063 Iustin Pop
    """
193 a8083063 Iustin Pop
    raise NotImplementedError
194 a8083063 Iustin Pop
195 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
196 a8083063 Iustin Pop
    """Execute the LU.
197 a8083063 Iustin Pop

198 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
199 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
200 a8083063 Iustin Pop
    code, or expected.
201 a8083063 Iustin Pop

202 a8083063 Iustin Pop
    """
203 a8083063 Iustin Pop
    raise NotImplementedError
204 a8083063 Iustin Pop
205 a8083063 Iustin Pop
  def BuildHooksEnv(self):
206 a8083063 Iustin Pop
    """Build hooks environment for this LU.
207 a8083063 Iustin Pop

208 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
209 a8083063 Iustin Pop
    containing the environment that will be used for running the
210 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
211 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
212 a8083063 Iustin Pop
    the hook should run after the execution.
213 a8083063 Iustin Pop

214 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
215 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
216 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
217 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
218 a8083063 Iustin Pop

219 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
220 a8083063 Iustin Pop

221 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
222 a8083063 Iustin Pop
    not be called.
223 a8083063 Iustin Pop

224 a8083063 Iustin Pop
    """
225 a8083063 Iustin Pop
    raise NotImplementedError
226 a8083063 Iustin Pop
227 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
228 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
229 1fce5219 Guido Trotter

230 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
231 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
232 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
233 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
234 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
235 1fce5219 Guido Trotter

236 1fce5219 Guido Trotter
    Args:
237 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
238 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
239 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
240 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
241 1fce5219 Guido Trotter

242 1fce5219 Guido Trotter
    """
243 1fce5219 Guido Trotter
    return lu_result
244 1fce5219 Guido Trotter
245 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
246 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
247 43905206 Guido Trotter

248 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
249 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
250 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
251 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
252 43905206 Guido Trotter
    before.
253 43905206 Guido Trotter

254 43905206 Guido Trotter
    """
255 43905206 Guido Trotter
    if self.needed_locks is None:
256 43905206 Guido Trotter
      self.needed_locks = {}
257 43905206 Guido Trotter
    else:
258 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
259 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
260 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
261 43905206 Guido Trotter
    if expanded_name is None:
262 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
263 43905206 Guido Trotter
                                  self.op.instance_name)
264 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
265 43905206 Guido Trotter
    self.op.instance_name = expanded_name
266 43905206 Guido Trotter
267 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
268 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
269 c4a2fee1 Guido Trotter

270 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
271 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
272 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
273 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
274 c4a2fee1 Guido Trotter

275 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
276 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
277 c4a2fee1 Guido Trotter

278 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
279 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
280 c4a2fee1 Guido Trotter

281 c4a2fee1 Guido Trotter
    If should be called in DeclareLocks in a way similar to:
282 c4a2fee1 Guido Trotter

283 c4a2fee1 Guido Trotter
    if level == locking.LEVEL_NODE:
284 c4a2fee1 Guido Trotter
      self._LockInstancesNodes()
285 c4a2fee1 Guido Trotter

286 a82ce292 Guido Trotter
    @type primary_only: boolean
287 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
288 a82ce292 Guido Trotter

289 c4a2fee1 Guido Trotter
    """
290 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
291 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
292 c4a2fee1 Guido Trotter
293 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
294 c4a2fee1 Guido Trotter
295 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
296 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
297 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
298 c4a2fee1 Guido Trotter
    wanted_nodes = []
299 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
300 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
301 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
302 a82ce292 Guido Trotter
      if not primary_only:
303 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
304 9513b6ab Guido Trotter
305 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
306 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
307 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
308 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
309 c4a2fee1 Guido Trotter
310 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
311 c4a2fee1 Guido Trotter
312 a8083063 Iustin Pop
313 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
314 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
315 a8083063 Iustin Pop

316 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
317 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
318 a8083063 Iustin Pop

319 a8083063 Iustin Pop
  """
320 a8083063 Iustin Pop
  HPATH = None
321 a8083063 Iustin Pop
  HTYPE = None
322 a8083063 Iustin Pop
323 a8083063 Iustin Pop
324 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
325 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
326 83120a01 Michael Hanselmann

327 83120a01 Michael Hanselmann
  Args:
328 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
329 83120a01 Michael Hanselmann

330 83120a01 Michael Hanselmann
  """
331 3312b702 Iustin Pop
  if not isinstance(nodes, list):
332 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
333 dcb93971 Michael Hanselmann
334 ea47808a Guido Trotter
  if not nodes:
335 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
336 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
337 dcb93971 Michael Hanselmann
338 ea47808a Guido Trotter
  wanted = []
339 ea47808a Guido Trotter
  for name in nodes:
340 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
341 ea47808a Guido Trotter
    if node is None:
342 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
343 ea47808a Guido Trotter
    wanted.append(node)
344 dcb93971 Michael Hanselmann
345 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
346 3312b702 Iustin Pop
347 3312b702 Iustin Pop
348 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
349 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
350 3312b702 Iustin Pop

351 3312b702 Iustin Pop
  Args:
352 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
353 3312b702 Iustin Pop

354 3312b702 Iustin Pop
  """
355 3312b702 Iustin Pop
  if not isinstance(instances, list):
356 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
357 3312b702 Iustin Pop
358 3312b702 Iustin Pop
  if instances:
359 3312b702 Iustin Pop
    wanted = []
360 3312b702 Iustin Pop
361 3312b702 Iustin Pop
    for name in instances:
362 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
363 3312b702 Iustin Pop
      if instance is None:
364 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
365 3312b702 Iustin Pop
      wanted.append(instance)
366 3312b702 Iustin Pop
367 3312b702 Iustin Pop
  else:
368 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
369 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
370 dcb93971 Michael Hanselmann
371 dcb93971 Michael Hanselmann
372 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
373 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
374 83120a01 Michael Hanselmann

375 83120a01 Michael Hanselmann
  Args:
376 83120a01 Michael Hanselmann
    static: Static fields
377 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
378 83120a01 Michael Hanselmann

379 83120a01 Michael Hanselmann
  """
380 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
381 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
382 dcb93971 Michael Hanselmann
383 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
384 dcb93971 Michael Hanselmann
385 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
386 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
387 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
388 3ecf6786 Iustin Pop
                                          difference(all_fields)))
389 dcb93971 Michael Hanselmann
390 dcb93971 Michael Hanselmann
391 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
392 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
393 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
394 ecb215b5 Michael Hanselmann

395 ecb215b5 Michael Hanselmann
  Args:
396 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
397 396e1b78 Michael Hanselmann
  """
398 396e1b78 Michael Hanselmann
  env = {
399 0e137c28 Iustin Pop
    "OP_TARGET": name,
400 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
401 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
402 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
403 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
404 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
405 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
406 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
407 396e1b78 Michael Hanselmann
  }
408 396e1b78 Michael Hanselmann
409 396e1b78 Michael Hanselmann
  if nics:
410 396e1b78 Michael Hanselmann
    nic_count = len(nics)
411 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
412 396e1b78 Michael Hanselmann
      if ip is None:
413 396e1b78 Michael Hanselmann
        ip = ""
414 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
415 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
416 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
417 396e1b78 Michael Hanselmann
  else:
418 396e1b78 Michael Hanselmann
    nic_count = 0
419 396e1b78 Michael Hanselmann
420 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
421 396e1b78 Michael Hanselmann
422 396e1b78 Michael Hanselmann
  return env
423 396e1b78 Michael Hanselmann
424 396e1b78 Michael Hanselmann
425 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
426 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
427 ecb215b5 Michael Hanselmann

428 ecb215b5 Michael Hanselmann
  Args:
429 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
430 ecb215b5 Michael Hanselmann
    override: dict of values to override
431 ecb215b5 Michael Hanselmann
  """
432 396e1b78 Michael Hanselmann
  args = {
433 396e1b78 Michael Hanselmann
    'name': instance.name,
434 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
435 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
436 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
437 396e1b78 Michael Hanselmann
    'status': instance.os,
438 396e1b78 Michael Hanselmann
    'memory': instance.memory,
439 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
440 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
441 396e1b78 Michael Hanselmann
  }
442 396e1b78 Michael Hanselmann
  if override:
443 396e1b78 Michael Hanselmann
    args.update(override)
444 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
445 396e1b78 Michael Hanselmann
446 396e1b78 Michael Hanselmann
447 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
448 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
449 bf6929a2 Alexander Schreiber

450 bf6929a2 Alexander Schreiber
  """
451 bf6929a2 Alexander Schreiber
  # check bridges existance
452 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
453 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
454 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
455 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
456 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
457 bf6929a2 Alexander Schreiber
458 bf6929a2 Alexander Schreiber
459 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
460 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
461 a8083063 Iustin Pop

462 a8083063 Iustin Pop
  """
463 a8083063 Iustin Pop
  _OP_REQP = []
464 a8083063 Iustin Pop
465 a8083063 Iustin Pop
  def CheckPrereq(self):
466 a8083063 Iustin Pop
    """Check prerequisites.
467 a8083063 Iustin Pop

468 a8083063 Iustin Pop
    This checks whether the cluster is empty.
469 a8083063 Iustin Pop

470 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
471 a8083063 Iustin Pop

472 a8083063 Iustin Pop
    """
473 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
474 a8083063 Iustin Pop
475 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
476 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
477 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
478 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
479 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
480 db915bd1 Michael Hanselmann
    if instancelist:
481 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
482 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
483 a8083063 Iustin Pop
484 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
485 a8083063 Iustin Pop
    """Destroys the cluster.
486 a8083063 Iustin Pop

487 a8083063 Iustin Pop
    """
488 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
489 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
490 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
491 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
492 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
493 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
494 140aa4a8 Iustin Pop
    return master
495 a8083063 Iustin Pop
496 a8083063 Iustin Pop
497 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
498 a8083063 Iustin Pop
  """Verifies the cluster status.
499 a8083063 Iustin Pop

500 a8083063 Iustin Pop
  """
501 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
502 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
503 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
504 d4b9d97f Guido Trotter
  REQ_BGL = False
505 d4b9d97f Guido Trotter
506 d4b9d97f Guido Trotter
  def ExpandNames(self):
507 d4b9d97f Guido Trotter
    self.needed_locks = {
508 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
509 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
510 d4b9d97f Guido Trotter
    }
511 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
512 a8083063 Iustin Pop
513 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
514 a8083063 Iustin Pop
                  remote_version, feedback_fn):
515 a8083063 Iustin Pop
    """Run multiple tests against a node.
516 a8083063 Iustin Pop

517 a8083063 Iustin Pop
    Test list:
518 a8083063 Iustin Pop
      - compares ganeti version
519 a8083063 Iustin Pop
      - checks vg existance and size > 20G
520 a8083063 Iustin Pop
      - checks config file checksum
521 a8083063 Iustin Pop
      - checks ssh to other nodes
522 a8083063 Iustin Pop

523 a8083063 Iustin Pop
    Args:
524 a8083063 Iustin Pop
      node: name of the node to check
525 a8083063 Iustin Pop
      file_list: required list of files
526 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
527 098c0958 Michael Hanselmann

528 a8083063 Iustin Pop
    """
529 a8083063 Iustin Pop
    # compares ganeti version
530 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
531 a8083063 Iustin Pop
    if not remote_version:
532 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
533 a8083063 Iustin Pop
      return True
534 a8083063 Iustin Pop
535 a8083063 Iustin Pop
    if local_version != remote_version:
536 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
537 a8083063 Iustin Pop
                      (local_version, node, remote_version))
538 a8083063 Iustin Pop
      return True
539 a8083063 Iustin Pop
540 a8083063 Iustin Pop
    # checks vg existance and size > 20G
541 a8083063 Iustin Pop
542 a8083063 Iustin Pop
    bad = False
543 a8083063 Iustin Pop
    if not vglist:
544 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
545 a8083063 Iustin Pop
                      (node,))
546 a8083063 Iustin Pop
      bad = True
547 a8083063 Iustin Pop
    else:
548 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
549 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
550 a8083063 Iustin Pop
      if vgstatus:
551 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
552 a8083063 Iustin Pop
        bad = True
553 a8083063 Iustin Pop
554 a8083063 Iustin Pop
    # checks config file checksum
555 a8083063 Iustin Pop
    # checks ssh to any
556 a8083063 Iustin Pop
557 a8083063 Iustin Pop
    if 'filelist' not in node_result:
558 a8083063 Iustin Pop
      bad = True
559 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
560 a8083063 Iustin Pop
    else:
561 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
562 a8083063 Iustin Pop
      for file_name in file_list:
563 a8083063 Iustin Pop
        if file_name not in remote_cksum:
564 a8083063 Iustin Pop
          bad = True
565 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
566 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
567 a8083063 Iustin Pop
          bad = True
568 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
569 a8083063 Iustin Pop
570 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
571 a8083063 Iustin Pop
      bad = True
572 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
573 a8083063 Iustin Pop
    else:
574 a8083063 Iustin Pop
      if node_result['nodelist']:
575 a8083063 Iustin Pop
        bad = True
576 a8083063 Iustin Pop
        for node in node_result['nodelist']:
577 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
578 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
579 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
580 9d4bfc96 Iustin Pop
      bad = True
581 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
582 9d4bfc96 Iustin Pop
    else:
583 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
584 9d4bfc96 Iustin Pop
        bad = True
585 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
586 9d4bfc96 Iustin Pop
        for node in nlist:
587 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
588 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
589 9d4bfc96 Iustin Pop
590 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
591 a8083063 Iustin Pop
    if hyp_result is not None:
592 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
593 a8083063 Iustin Pop
    return bad
594 a8083063 Iustin Pop
595 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
596 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
597 a8083063 Iustin Pop
    """Verify an instance.
598 a8083063 Iustin Pop

599 a8083063 Iustin Pop
    This function checks to see if the required block devices are
600 a8083063 Iustin Pop
    available on the instance's node.
601 a8083063 Iustin Pop

602 a8083063 Iustin Pop
    """
603 a8083063 Iustin Pop
    bad = False
604 a8083063 Iustin Pop
605 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
606 a8083063 Iustin Pop
607 a8083063 Iustin Pop
    node_vol_should = {}
608 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
609 a8083063 Iustin Pop
610 a8083063 Iustin Pop
    for node in node_vol_should:
611 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
612 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
613 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
614 a8083063 Iustin Pop
                          (volume, node))
615 a8083063 Iustin Pop
          bad = True
616 a8083063 Iustin Pop
617 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
618 a872dae6 Guido Trotter
      if (node_current not in node_instance or
619 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
620 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
621 a8083063 Iustin Pop
                        (instance, node_current))
622 a8083063 Iustin Pop
        bad = True
623 a8083063 Iustin Pop
624 a8083063 Iustin Pop
    for node in node_instance:
625 a8083063 Iustin Pop
      if (not node == node_current):
626 a8083063 Iustin Pop
        if instance in node_instance[node]:
627 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
628 a8083063 Iustin Pop
                          (instance, node))
629 a8083063 Iustin Pop
          bad = True
630 a8083063 Iustin Pop
631 6a438c98 Michael Hanselmann
    return bad
632 a8083063 Iustin Pop
633 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
634 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
635 a8083063 Iustin Pop

636 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
637 a8083063 Iustin Pop
    reported as unknown.
638 a8083063 Iustin Pop

639 a8083063 Iustin Pop
    """
640 a8083063 Iustin Pop
    bad = False
641 a8083063 Iustin Pop
642 a8083063 Iustin Pop
    for node in node_vol_is:
643 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
644 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
645 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
646 a8083063 Iustin Pop
                      (volume, node))
647 a8083063 Iustin Pop
          bad = True
648 a8083063 Iustin Pop
    return bad
649 a8083063 Iustin Pop
650 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
651 a8083063 Iustin Pop
    """Verify the list of running instances.
652 a8083063 Iustin Pop

653 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
654 a8083063 Iustin Pop

655 a8083063 Iustin Pop
    """
656 a8083063 Iustin Pop
    bad = False
657 a8083063 Iustin Pop
    for node in node_instance:
658 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
659 a8083063 Iustin Pop
        if runninginstance not in instancelist:
660 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
661 a8083063 Iustin Pop
                          (runninginstance, node))
662 a8083063 Iustin Pop
          bad = True
663 a8083063 Iustin Pop
    return bad
664 a8083063 Iustin Pop
665 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
666 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
667 2b3b6ddd Guido Trotter

668 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
669 2b3b6ddd Guido Trotter
    was primary for.
670 2b3b6ddd Guido Trotter

671 2b3b6ddd Guido Trotter
    """
672 2b3b6ddd Guido Trotter
    bad = False
673 2b3b6ddd Guido Trotter
674 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
675 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
676 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
677 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
678 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
679 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
680 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
681 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
682 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
683 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
684 2b3b6ddd Guido Trotter
        needed_mem = 0
685 2b3b6ddd Guido Trotter
        for instance in instances:
686 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
687 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
688 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
689 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
690 2b3b6ddd Guido Trotter
          bad = True
691 2b3b6ddd Guido Trotter
    return bad
692 2b3b6ddd Guido Trotter
693 a8083063 Iustin Pop
  def CheckPrereq(self):
694 a8083063 Iustin Pop
    """Check prerequisites.
695 a8083063 Iustin Pop

696 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
697 e54c4c5e Guido Trotter
    all its members are valid.
698 a8083063 Iustin Pop

699 a8083063 Iustin Pop
    """
700 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
701 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
702 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
703 a8083063 Iustin Pop
704 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
705 d8fff41c Guido Trotter
    """Build hooks env.
706 d8fff41c Guido Trotter

707 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
708 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
709 d8fff41c Guido Trotter

710 d8fff41c Guido Trotter
    """
711 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
712 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
713 d8fff41c Guido Trotter
    env = {}
714 d8fff41c Guido Trotter
    return env, [], all_nodes
715 d8fff41c Guido Trotter
716 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
717 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
718 a8083063 Iustin Pop

719 a8083063 Iustin Pop
    """
720 a8083063 Iustin Pop
    bad = False
721 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
722 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
723 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
724 a8083063 Iustin Pop
725 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
726 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
727 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
728 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
729 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
730 a8083063 Iustin Pop
    node_volume = {}
731 a8083063 Iustin Pop
    node_instance = {}
732 9c9c7d30 Guido Trotter
    node_info = {}
733 26b6af5e Guido Trotter
    instance_cfg = {}
734 a8083063 Iustin Pop
735 a8083063 Iustin Pop
    # FIXME: verify OS list
736 a8083063 Iustin Pop
    # do local checksums
737 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
738 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
739 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
740 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
741 a8083063 Iustin Pop
742 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
743 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
744 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
745 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
746 a8083063 Iustin Pop
    node_verify_param = {
747 a8083063 Iustin Pop
      'filelist': file_names,
748 a8083063 Iustin Pop
      'nodelist': nodelist,
749 a8083063 Iustin Pop
      'hypervisor': None,
750 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
751 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
752 a8083063 Iustin Pop
      }
753 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
754 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
755 9c9c7d30 Guido Trotter
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
756 a8083063 Iustin Pop
757 a8083063 Iustin Pop
    for node in nodelist:
758 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
759 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
760 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
761 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
762 a8083063 Iustin Pop
      bad = bad or result
763 a8083063 Iustin Pop
764 a8083063 Iustin Pop
      # node_volume
765 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
766 a8083063 Iustin Pop
767 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
768 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
769 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
770 b63ed789 Iustin Pop
        bad = True
771 b63ed789 Iustin Pop
        node_volume[node] = {}
772 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
773 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
774 a8083063 Iustin Pop
        bad = True
775 a8083063 Iustin Pop
        continue
776 b63ed789 Iustin Pop
      else:
777 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
778 a8083063 Iustin Pop
779 a8083063 Iustin Pop
      # node_instance
780 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
781 a8083063 Iustin Pop
      if type(nodeinstance) != list:
782 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
783 a8083063 Iustin Pop
        bad = True
784 a8083063 Iustin Pop
        continue
785 a8083063 Iustin Pop
786 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
787 a8083063 Iustin Pop
788 9c9c7d30 Guido Trotter
      # node_info
789 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
790 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
791 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
792 9c9c7d30 Guido Trotter
        bad = True
793 9c9c7d30 Guido Trotter
        continue
794 9c9c7d30 Guido Trotter
795 9c9c7d30 Guido Trotter
      try:
796 9c9c7d30 Guido Trotter
        node_info[node] = {
797 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
798 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
799 93e4c50b Guido Trotter
          "pinst": [],
800 93e4c50b Guido Trotter
          "sinst": [],
801 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
802 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
803 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
804 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
805 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
806 36e7da50 Guido Trotter
          # secondary.
807 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
808 9c9c7d30 Guido Trotter
        }
809 9c9c7d30 Guido Trotter
      except ValueError:
810 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
811 9c9c7d30 Guido Trotter
        bad = True
812 9c9c7d30 Guido Trotter
        continue
813 9c9c7d30 Guido Trotter
814 a8083063 Iustin Pop
    node_vol_should = {}
815 a8083063 Iustin Pop
816 a8083063 Iustin Pop
    for instance in instancelist:
817 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
818 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
819 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
820 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
821 c5705f58 Guido Trotter
      bad = bad or result
822 a8083063 Iustin Pop
823 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
824 a8083063 Iustin Pop
825 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
826 26b6af5e Guido Trotter
827 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
828 93e4c50b Guido Trotter
      if pnode in node_info:
829 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
830 93e4c50b Guido Trotter
      else:
831 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
832 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
833 93e4c50b Guido Trotter
        bad = True
834 93e4c50b Guido Trotter
835 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
836 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
837 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
838 93e4c50b Guido Trotter
      # supported either.
839 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
840 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
841 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
842 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
843 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
844 93e4c50b Guido Trotter
                    % instance)
845 93e4c50b Guido Trotter
846 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
847 93e4c50b Guido Trotter
        if snode in node_info:
848 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
849 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
850 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
851 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
852 93e4c50b Guido Trotter
        else:
853 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
854 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
855 93e4c50b Guido Trotter
856 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
857 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
858 a8083063 Iustin Pop
                                       feedback_fn)
859 a8083063 Iustin Pop
    bad = bad or result
860 a8083063 Iustin Pop
861 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
862 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
863 a8083063 Iustin Pop
                                         feedback_fn)
864 a8083063 Iustin Pop
    bad = bad or result
865 a8083063 Iustin Pop
866 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
867 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
868 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
869 e54c4c5e Guido Trotter
      bad = bad or result
870 2b3b6ddd Guido Trotter
871 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
872 2b3b6ddd Guido Trotter
    if i_non_redundant:
873 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
874 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
875 2b3b6ddd Guido Trotter
876 34290825 Michael Hanselmann
    return not bad
877 a8083063 Iustin Pop
878 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
879 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
880 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
881 d8fff41c Guido Trotter

882 d8fff41c Guido Trotter
    Args:
883 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
884 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
885 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
886 d8fff41c Guido Trotter
      lu_result: previous Exec result
887 d8fff41c Guido Trotter

888 d8fff41c Guido Trotter
    """
889 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
890 38206f3c Iustin Pop
    # their results
891 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
892 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
893 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
894 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
895 d8fff41c Guido Trotter
      if not hooks_results:
896 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
897 d8fff41c Guido Trotter
        lu_result = 1
898 d8fff41c Guido Trotter
      else:
899 d8fff41c Guido Trotter
        for node_name in hooks_results:
900 d8fff41c Guido Trotter
          show_node_header = True
901 d8fff41c Guido Trotter
          res = hooks_results[node_name]
902 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
903 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
904 d8fff41c Guido Trotter
            lu_result = 1
905 d8fff41c Guido Trotter
            continue
906 d8fff41c Guido Trotter
          for script, hkr, output in res:
907 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
908 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
909 d8fff41c Guido Trotter
              # failing hooks on that node
910 d8fff41c Guido Trotter
              if show_node_header:
911 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
912 d8fff41c Guido Trotter
                show_node_header = False
913 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
914 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
915 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
916 d8fff41c Guido Trotter
              lu_result = 1
917 d8fff41c Guido Trotter
918 d8fff41c Guido Trotter
      return lu_result
919 d8fff41c Guido Trotter
920 a8083063 Iustin Pop
921 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
922 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
923 2c95a8d4 Iustin Pop

924 2c95a8d4 Iustin Pop
  """
925 2c95a8d4 Iustin Pop
  _OP_REQP = []
926 d4b9d97f Guido Trotter
  REQ_BGL = False
927 d4b9d97f Guido Trotter
928 d4b9d97f Guido Trotter
  def ExpandNames(self):
929 d4b9d97f Guido Trotter
    self.needed_locks = {
930 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
931 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
932 d4b9d97f Guido Trotter
    }
933 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
934 2c95a8d4 Iustin Pop
935 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
936 2c95a8d4 Iustin Pop
    """Check prerequisites.
937 2c95a8d4 Iustin Pop

938 2c95a8d4 Iustin Pop
    This has no prerequisites.
939 2c95a8d4 Iustin Pop

940 2c95a8d4 Iustin Pop
    """
941 2c95a8d4 Iustin Pop
    pass
942 2c95a8d4 Iustin Pop
943 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
944 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
945 2c95a8d4 Iustin Pop

946 2c95a8d4 Iustin Pop
    """
947 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
948 2c95a8d4 Iustin Pop
949 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
950 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
951 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
952 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
953 2c95a8d4 Iustin Pop
954 2c95a8d4 Iustin Pop
    nv_dict = {}
955 2c95a8d4 Iustin Pop
    for inst in instances:
956 2c95a8d4 Iustin Pop
      inst_lvs = {}
957 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
958 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
959 2c95a8d4 Iustin Pop
        continue
960 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
961 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
962 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
963 2c95a8d4 Iustin Pop
        for vol in vol_list:
964 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
965 2c95a8d4 Iustin Pop
966 2c95a8d4 Iustin Pop
    if not nv_dict:
967 2c95a8d4 Iustin Pop
      return result
968 2c95a8d4 Iustin Pop
969 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
970 2c95a8d4 Iustin Pop
971 2c95a8d4 Iustin Pop
    to_act = set()
972 2c95a8d4 Iustin Pop
    for node in nodes:
973 2c95a8d4 Iustin Pop
      # node_volume
974 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
975 2c95a8d4 Iustin Pop
976 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
977 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
978 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
979 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
980 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
981 2c95a8d4 Iustin Pop
                    (node,))
982 2c95a8d4 Iustin Pop
        res_nodes.append(node)
983 2c95a8d4 Iustin Pop
        continue
984 2c95a8d4 Iustin Pop
985 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
986 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
987 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
988 b63ed789 Iustin Pop
            and inst.name not in res_instances):
989 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
990 2c95a8d4 Iustin Pop
991 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
992 b63ed789 Iustin Pop
    # data better
993 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
994 b63ed789 Iustin Pop
      if inst.name not in res_missing:
995 b63ed789 Iustin Pop
        res_missing[inst.name] = []
996 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
997 b63ed789 Iustin Pop
998 2c95a8d4 Iustin Pop
    return result
999 2c95a8d4 Iustin Pop
1000 2c95a8d4 Iustin Pop
1001 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1002 07bd8a51 Iustin Pop
  """Rename the cluster.
1003 07bd8a51 Iustin Pop

1004 07bd8a51 Iustin Pop
  """
1005 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1006 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1007 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1008 05f86716 Guido Trotter
  REQ_WSSTORE = True
1009 07bd8a51 Iustin Pop
1010 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1011 07bd8a51 Iustin Pop
    """Build hooks env.
1012 07bd8a51 Iustin Pop

1013 07bd8a51 Iustin Pop
    """
1014 07bd8a51 Iustin Pop
    env = {
1015 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
1016 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1017 07bd8a51 Iustin Pop
      }
1018 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
1019 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1020 07bd8a51 Iustin Pop
1021 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1022 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1023 07bd8a51 Iustin Pop

1024 07bd8a51 Iustin Pop
    """
1025 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1026 07bd8a51 Iustin Pop
1027 bcf043c9 Iustin Pop
    new_name = hostname.name
1028 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1029 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
1030 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
1031 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1032 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1033 07bd8a51 Iustin Pop
                                 " cluster has changed")
1034 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1035 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1036 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1037 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1038 07bd8a51 Iustin Pop
                                   new_ip)
1039 07bd8a51 Iustin Pop
1040 07bd8a51 Iustin Pop
    self.op.name = new_name
1041 07bd8a51 Iustin Pop
1042 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1043 07bd8a51 Iustin Pop
    """Rename the cluster.
1044 07bd8a51 Iustin Pop

1045 07bd8a51 Iustin Pop
    """
1046 07bd8a51 Iustin Pop
    clustername = self.op.name
1047 07bd8a51 Iustin Pop
    ip = self.ip
1048 07bd8a51 Iustin Pop
    ss = self.sstore
1049 07bd8a51 Iustin Pop
1050 07bd8a51 Iustin Pop
    # shutdown the master IP
1051 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
1052 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
1053 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1054 07bd8a51 Iustin Pop
1055 07bd8a51 Iustin Pop
    try:
1056 07bd8a51 Iustin Pop
      # modify the sstore
1057 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1058 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1059 07bd8a51 Iustin Pop
1060 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1061 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1062 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1063 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1064 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1065 07bd8a51 Iustin Pop
1066 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1067 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1068 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1069 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1070 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1071 07bd8a51 Iustin Pop
          if not result[to_node]:
1072 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1073 07bd8a51 Iustin Pop
                         (fname, to_node))
1074 07bd8a51 Iustin Pop
    finally:
1075 1c65840b Iustin Pop
      if not rpc.call_node_start_master(master, False):
1076 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1077 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1078 07bd8a51 Iustin Pop
1079 07bd8a51 Iustin Pop
1080 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1081 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1082 8084f9f6 Manuel Franceschini

1083 8084f9f6 Manuel Franceschini
  Args:
1084 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1085 8084f9f6 Manuel Franceschini

1086 8084f9f6 Manuel Franceschini
  Returns:
1087 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1088 8084f9f6 Manuel Franceschini

1089 8084f9f6 Manuel Franceschini
  """
1090 8084f9f6 Manuel Franceschini
  if disk.children:
1091 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1092 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1093 8084f9f6 Manuel Franceschini
        return True
1094 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1095 8084f9f6 Manuel Franceschini
1096 8084f9f6 Manuel Franceschini
1097 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1098 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1099 8084f9f6 Manuel Franceschini

1100 8084f9f6 Manuel Franceschini
  """
1101 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1102 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1103 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1104 c53279cf Guido Trotter
  REQ_BGL = False
1105 c53279cf Guido Trotter
1106 c53279cf Guido Trotter
  def ExpandNames(self):
1107 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1108 c53279cf Guido Trotter
    # all nodes to be modified.
1109 c53279cf Guido Trotter
    self.needed_locks = {
1110 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1111 c53279cf Guido Trotter
    }
1112 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1113 8084f9f6 Manuel Franceschini
1114 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1115 8084f9f6 Manuel Franceschini
    """Build hooks env.
1116 8084f9f6 Manuel Franceschini

1117 8084f9f6 Manuel Franceschini
    """
1118 8084f9f6 Manuel Franceschini
    env = {
1119 8084f9f6 Manuel Franceschini
      "OP_TARGET": self.sstore.GetClusterName(),
1120 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1121 8084f9f6 Manuel Franceschini
      }
1122 8084f9f6 Manuel Franceschini
    mn = self.sstore.GetMasterNode()
1123 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1124 8084f9f6 Manuel Franceschini
1125 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1126 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1127 8084f9f6 Manuel Franceschini

1128 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1129 5f83e263 Iustin Pop
    if the given volume group is valid.
1130 8084f9f6 Manuel Franceschini

1131 8084f9f6 Manuel Franceschini
    """
1132 c53279cf Guido Trotter
    # FIXME: This only works because there is only one parameter that can be
1133 c53279cf Guido Trotter
    # changed or removed.
1134 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
1135 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1136 8084f9f6 Manuel Franceschini
      for inst in instances:
1137 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1138 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1139 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1140 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1141 8084f9f6 Manuel Franceschini
1142 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1143 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1144 c53279cf Guido Trotter
      node_list = self.acquired_locks[locking.LEVEL_NODE]
1145 8084f9f6 Manuel Franceschini
      vglist = rpc.call_vg_list(node_list)
1146 8084f9f6 Manuel Franceschini
      for node in node_list:
1147 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1148 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1149 8084f9f6 Manuel Franceschini
        if vgstatus:
1150 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1151 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1152 8084f9f6 Manuel Franceschini
1153 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1154 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1155 8084f9f6 Manuel Franceschini

1156 8084f9f6 Manuel Franceschini
    """
1157 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
1158 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
1159 8084f9f6 Manuel Franceschini
    else:
1160 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1161 8084f9f6 Manuel Franceschini
                  " state, not changing")
1162 8084f9f6 Manuel Franceschini
1163 8084f9f6 Manuel Franceschini
1164 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1165 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1166 a8083063 Iustin Pop

1167 a8083063 Iustin Pop
  """
1168 a8083063 Iustin Pop
  if not instance.disks:
1169 a8083063 Iustin Pop
    return True
1170 a8083063 Iustin Pop
1171 a8083063 Iustin Pop
  if not oneshot:
1172 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1173 a8083063 Iustin Pop
1174 a8083063 Iustin Pop
  node = instance.primary_node
1175 a8083063 Iustin Pop
1176 a8083063 Iustin Pop
  for dev in instance.disks:
1177 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1178 a8083063 Iustin Pop
1179 a8083063 Iustin Pop
  retries = 0
1180 a8083063 Iustin Pop
  while True:
1181 a8083063 Iustin Pop
    max_time = 0
1182 a8083063 Iustin Pop
    done = True
1183 a8083063 Iustin Pop
    cumul_degraded = False
1184 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1185 a8083063 Iustin Pop
    if not rstats:
1186 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1187 a8083063 Iustin Pop
      retries += 1
1188 a8083063 Iustin Pop
      if retries >= 10:
1189 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1190 3ecf6786 Iustin Pop
                                 " aborting." % node)
1191 a8083063 Iustin Pop
      time.sleep(6)
1192 a8083063 Iustin Pop
      continue
1193 a8083063 Iustin Pop
    retries = 0
1194 a8083063 Iustin Pop
    for i in range(len(rstats)):
1195 a8083063 Iustin Pop
      mstat = rstats[i]
1196 a8083063 Iustin Pop
      if mstat is None:
1197 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1198 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1199 a8083063 Iustin Pop
        continue
1200 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1201 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1202 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1203 a8083063 Iustin Pop
      if perc_done is not None:
1204 a8083063 Iustin Pop
        done = False
1205 a8083063 Iustin Pop
        if est_time is not None:
1206 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1207 a8083063 Iustin Pop
          max_time = est_time
1208 a8083063 Iustin Pop
        else:
1209 a8083063 Iustin Pop
          rem_time = "no time estimate"
1210 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1211 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1212 a8083063 Iustin Pop
    if done or oneshot:
1213 a8083063 Iustin Pop
      break
1214 a8083063 Iustin Pop
1215 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1216 a8083063 Iustin Pop
1217 a8083063 Iustin Pop
  if done:
1218 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1219 a8083063 Iustin Pop
  return not cumul_degraded
1220 a8083063 Iustin Pop
1221 a8083063 Iustin Pop
1222 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1223 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1224 a8083063 Iustin Pop

1225 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1226 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1227 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1228 0834c866 Iustin Pop

1229 a8083063 Iustin Pop
  """
1230 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1231 0834c866 Iustin Pop
  if ldisk:
1232 0834c866 Iustin Pop
    idx = 6
1233 0834c866 Iustin Pop
  else:
1234 0834c866 Iustin Pop
    idx = 5
1235 a8083063 Iustin Pop
1236 a8083063 Iustin Pop
  result = True
1237 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1238 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1239 a8083063 Iustin Pop
    if not rstats:
1240 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1241 a8083063 Iustin Pop
      result = False
1242 a8083063 Iustin Pop
    else:
1243 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1244 a8083063 Iustin Pop
  if dev.children:
1245 a8083063 Iustin Pop
    for child in dev.children:
1246 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1247 a8083063 Iustin Pop
1248 a8083063 Iustin Pop
  return result
1249 a8083063 Iustin Pop
1250 a8083063 Iustin Pop
1251 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1252 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1253 a8083063 Iustin Pop

1254 a8083063 Iustin Pop
  """
1255 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1256 6bf01bbb Guido Trotter
  REQ_BGL = False
1257 a8083063 Iustin Pop
1258 6bf01bbb Guido Trotter
  def ExpandNames(self):
1259 1f9430d6 Iustin Pop
    if self.op.names:
1260 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1261 1f9430d6 Iustin Pop
1262 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1263 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1264 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1265 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1266 1f9430d6 Iustin Pop
1267 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1268 6bf01bbb Guido Trotter
    self.needed_locks = {}
1269 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1270 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1271 6bf01bbb Guido Trotter
1272 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1273 6bf01bbb Guido Trotter
    """Check prerequisites.
1274 6bf01bbb Guido Trotter

1275 6bf01bbb Guido Trotter
    """
1276 6bf01bbb Guido Trotter
1277 1f9430d6 Iustin Pop
  @staticmethod
1278 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1279 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1280 1f9430d6 Iustin Pop

1281 1f9430d6 Iustin Pop
      Args:
1282 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1283 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1284 1f9430d6 Iustin Pop

1285 1f9430d6 Iustin Pop
      Returns:
1286 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1287 1f9430d6 Iustin Pop
             nodes as
1288 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1289 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1290 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1291 1f9430d6 Iustin Pop
                  }
1292 1f9430d6 Iustin Pop

1293 1f9430d6 Iustin Pop
    """
1294 1f9430d6 Iustin Pop
    all_os = {}
1295 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1296 1f9430d6 Iustin Pop
      if not nr:
1297 1f9430d6 Iustin Pop
        continue
1298 b4de68a9 Iustin Pop
      for os_obj in nr:
1299 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1300 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1301 1f9430d6 Iustin Pop
          # for each node in node_list
1302 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1303 1f9430d6 Iustin Pop
          for nname in node_list:
1304 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1305 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1306 1f9430d6 Iustin Pop
    return all_os
1307 a8083063 Iustin Pop
1308 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1309 a8083063 Iustin Pop
    """Compute the list of OSes.
1310 a8083063 Iustin Pop

1311 a8083063 Iustin Pop
    """
1312 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1313 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1314 a8083063 Iustin Pop
    if node_data == False:
1315 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1316 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1317 1f9430d6 Iustin Pop
    output = []
1318 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1319 1f9430d6 Iustin Pop
      row = []
1320 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1321 1f9430d6 Iustin Pop
        if field == "name":
1322 1f9430d6 Iustin Pop
          val = os_name
1323 1f9430d6 Iustin Pop
        elif field == "valid":
1324 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1325 1f9430d6 Iustin Pop
        elif field == "node_status":
1326 1f9430d6 Iustin Pop
          val = {}
1327 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1328 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1329 1f9430d6 Iustin Pop
        else:
1330 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1331 1f9430d6 Iustin Pop
        row.append(val)
1332 1f9430d6 Iustin Pop
      output.append(row)
1333 1f9430d6 Iustin Pop
1334 1f9430d6 Iustin Pop
    return output
1335 a8083063 Iustin Pop
1336 a8083063 Iustin Pop
1337 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1338 a8083063 Iustin Pop
  """Logical unit for removing a node.
1339 a8083063 Iustin Pop

1340 a8083063 Iustin Pop
  """
1341 a8083063 Iustin Pop
  HPATH = "node-remove"
1342 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1343 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1344 a8083063 Iustin Pop
1345 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1346 a8083063 Iustin Pop
    """Build hooks env.
1347 a8083063 Iustin Pop

1348 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1349 d08869ee Guido Trotter
    node would then be impossible to remove.
1350 a8083063 Iustin Pop

1351 a8083063 Iustin Pop
    """
1352 396e1b78 Michael Hanselmann
    env = {
1353 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1354 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1355 396e1b78 Michael Hanselmann
      }
1356 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1357 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1358 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1359 a8083063 Iustin Pop
1360 a8083063 Iustin Pop
  def CheckPrereq(self):
1361 a8083063 Iustin Pop
    """Check prerequisites.
1362 a8083063 Iustin Pop

1363 a8083063 Iustin Pop
    This checks:
1364 a8083063 Iustin Pop
     - the node exists in the configuration
1365 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1366 a8083063 Iustin Pop
     - it's not the master
1367 a8083063 Iustin Pop

1368 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1369 a8083063 Iustin Pop

1370 a8083063 Iustin Pop
    """
1371 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1372 a8083063 Iustin Pop
    if node is None:
1373 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1374 a8083063 Iustin Pop
1375 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1376 a8083063 Iustin Pop
1377 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1378 a8083063 Iustin Pop
    if node.name == masternode:
1379 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1380 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1381 a8083063 Iustin Pop
1382 a8083063 Iustin Pop
    for instance_name in instance_list:
1383 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1384 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1385 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1386 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1387 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1388 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1389 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1390 a8083063 Iustin Pop
    self.op.node_name = node.name
1391 a8083063 Iustin Pop
    self.node = node
1392 a8083063 Iustin Pop
1393 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1394 a8083063 Iustin Pop
    """Removes the node from the cluster.
1395 a8083063 Iustin Pop

1396 a8083063 Iustin Pop
    """
1397 a8083063 Iustin Pop
    node = self.node
1398 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1399 a8083063 Iustin Pop
                node.name)
1400 a8083063 Iustin Pop
1401 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1402 a8083063 Iustin Pop
1403 d8470559 Michael Hanselmann
    rpc.call_node_leave_cluster(node.name)
1404 c8a0948f Michael Hanselmann
1405 a8083063 Iustin Pop
1406 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1407 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1408 a8083063 Iustin Pop

1409 a8083063 Iustin Pop
  """
1410 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1411 35705d8f Guido Trotter
  REQ_BGL = False
1412 a8083063 Iustin Pop
1413 35705d8f Guido Trotter
  def ExpandNames(self):
1414 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1415 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1416 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1417 e8a4c138 Iustin Pop
      "bootid",
1418 e8a4c138 Iustin Pop
      "ctotal",
1419 e8a4c138 Iustin Pop
      ])
1420 a8083063 Iustin Pop
1421 c8d8b4c8 Iustin Pop
    self.static_fields = frozenset([
1422 c8d8b4c8 Iustin Pop
      "name", "pinst_cnt", "sinst_cnt",
1423 c8d8b4c8 Iustin Pop
      "pinst_list", "sinst_list",
1424 c8d8b4c8 Iustin Pop
      "pip", "sip", "tags",
1425 c8d8b4c8 Iustin Pop
      ])
1426 c8d8b4c8 Iustin Pop
1427 c8d8b4c8 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
1428 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1429 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1430 a8083063 Iustin Pop
1431 35705d8f Guido Trotter
    self.needed_locks = {}
1432 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1433 c8d8b4c8 Iustin Pop
1434 c8d8b4c8 Iustin Pop
    if self.op.names:
1435 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1436 35705d8f Guido Trotter
    else:
1437 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1438 c8d8b4c8 Iustin Pop
1439 c8d8b4c8 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
1440 c8d8b4c8 Iustin Pop
    if self.do_locking:
1441 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1442 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1443 c8d8b4c8 Iustin Pop
1444 35705d8f Guido Trotter
1445 35705d8f Guido Trotter
  def CheckPrereq(self):
1446 35705d8f Guido Trotter
    """Check prerequisites.
1447 35705d8f Guido Trotter

1448 35705d8f Guido Trotter
    """
1449 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1450 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1451 c8d8b4c8 Iustin Pop
    pass
1452 a8083063 Iustin Pop
1453 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1454 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1455 a8083063 Iustin Pop

1456 a8083063 Iustin Pop
    """
1457 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1458 c8d8b4c8 Iustin Pop
    if self.do_locking:
1459 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1460 c8d8b4c8 Iustin Pop
    else:
1461 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1462 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1463 a8083063 Iustin Pop
1464 a8083063 Iustin Pop
    # begin data gathering
1465 a8083063 Iustin Pop
1466 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1467 a8083063 Iustin Pop
      live_data = {}
1468 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1469 a8083063 Iustin Pop
      for name in nodenames:
1470 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1471 a8083063 Iustin Pop
        if nodeinfo:
1472 a8083063 Iustin Pop
          live_data[name] = {
1473 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1474 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1475 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1476 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1477 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1478 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1479 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1480 a8083063 Iustin Pop
            }
1481 a8083063 Iustin Pop
        else:
1482 a8083063 Iustin Pop
          live_data[name] = {}
1483 a8083063 Iustin Pop
    else:
1484 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1485 a8083063 Iustin Pop
1486 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1487 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1488 a8083063 Iustin Pop
1489 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1490 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1491 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1492 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1493 a8083063 Iustin Pop
1494 ec223efb Iustin Pop
      for instance_name in instancelist:
1495 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1496 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1497 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1498 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1499 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1500 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1501 a8083063 Iustin Pop
1502 a8083063 Iustin Pop
    # end data gathering
1503 a8083063 Iustin Pop
1504 a8083063 Iustin Pop
    output = []
1505 a8083063 Iustin Pop
    for node in nodelist:
1506 a8083063 Iustin Pop
      node_output = []
1507 a8083063 Iustin Pop
      for field in self.op.output_fields:
1508 a8083063 Iustin Pop
        if field == "name":
1509 a8083063 Iustin Pop
          val = node.name
1510 ec223efb Iustin Pop
        elif field == "pinst_list":
1511 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1512 ec223efb Iustin Pop
        elif field == "sinst_list":
1513 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1514 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1515 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1516 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1517 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1518 a8083063 Iustin Pop
        elif field == "pip":
1519 a8083063 Iustin Pop
          val = node.primary_ip
1520 a8083063 Iustin Pop
        elif field == "sip":
1521 a8083063 Iustin Pop
          val = node.secondary_ip
1522 130a6a6f Iustin Pop
        elif field == "tags":
1523 130a6a6f Iustin Pop
          val = list(node.GetTags())
1524 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1525 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1526 a8083063 Iustin Pop
        else:
1527 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1528 a8083063 Iustin Pop
        node_output.append(val)
1529 a8083063 Iustin Pop
      output.append(node_output)
1530 a8083063 Iustin Pop
1531 a8083063 Iustin Pop
    return output
1532 a8083063 Iustin Pop
1533 a8083063 Iustin Pop
1534 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1535 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1536 dcb93971 Michael Hanselmann

1537 dcb93971 Michael Hanselmann
  """
1538 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1539 21a15682 Guido Trotter
  REQ_BGL = False
1540 21a15682 Guido Trotter
1541 21a15682 Guido Trotter
  def ExpandNames(self):
1542 21a15682 Guido Trotter
    _CheckOutputFields(static=["node"],
1543 21a15682 Guido Trotter
                       dynamic=["phys", "vg", "name", "size", "instance"],
1544 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1545 21a15682 Guido Trotter
1546 21a15682 Guido Trotter
    self.needed_locks = {}
1547 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1548 21a15682 Guido Trotter
    if not self.op.nodes:
1549 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1550 21a15682 Guido Trotter
    else:
1551 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1552 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1553 dcb93971 Michael Hanselmann
1554 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1555 dcb93971 Michael Hanselmann
    """Check prerequisites.
1556 dcb93971 Michael Hanselmann

1557 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1558 dcb93971 Michael Hanselmann

1559 dcb93971 Michael Hanselmann
    """
1560 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1561 dcb93971 Michael Hanselmann
1562 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1563 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1564 dcb93971 Michael Hanselmann

1565 dcb93971 Michael Hanselmann
    """
1566 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1567 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1568 dcb93971 Michael Hanselmann
1569 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1570 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1571 dcb93971 Michael Hanselmann
1572 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1573 dcb93971 Michael Hanselmann
1574 dcb93971 Michael Hanselmann
    output = []
1575 dcb93971 Michael Hanselmann
    for node in nodenames:
1576 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1577 37d19eb2 Michael Hanselmann
        continue
1578 37d19eb2 Michael Hanselmann
1579 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1580 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1581 dcb93971 Michael Hanselmann
1582 dcb93971 Michael Hanselmann
      for vol in node_vols:
1583 dcb93971 Michael Hanselmann
        node_output = []
1584 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1585 dcb93971 Michael Hanselmann
          if field == "node":
1586 dcb93971 Michael Hanselmann
            val = node
1587 dcb93971 Michael Hanselmann
          elif field == "phys":
1588 dcb93971 Michael Hanselmann
            val = vol['dev']
1589 dcb93971 Michael Hanselmann
          elif field == "vg":
1590 dcb93971 Michael Hanselmann
            val = vol['vg']
1591 dcb93971 Michael Hanselmann
          elif field == "name":
1592 dcb93971 Michael Hanselmann
            val = vol['name']
1593 dcb93971 Michael Hanselmann
          elif field == "size":
1594 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1595 dcb93971 Michael Hanselmann
          elif field == "instance":
1596 dcb93971 Michael Hanselmann
            for inst in ilist:
1597 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1598 dcb93971 Michael Hanselmann
                continue
1599 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1600 dcb93971 Michael Hanselmann
                val = inst.name
1601 dcb93971 Michael Hanselmann
                break
1602 dcb93971 Michael Hanselmann
            else:
1603 dcb93971 Michael Hanselmann
              val = '-'
1604 dcb93971 Michael Hanselmann
          else:
1605 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1606 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1607 dcb93971 Michael Hanselmann
1608 dcb93971 Michael Hanselmann
        output.append(node_output)
1609 dcb93971 Michael Hanselmann
1610 dcb93971 Michael Hanselmann
    return output
1611 dcb93971 Michael Hanselmann
1612 dcb93971 Michael Hanselmann
1613 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1614 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1615 a8083063 Iustin Pop

1616 a8083063 Iustin Pop
  """
1617 a8083063 Iustin Pop
  HPATH = "node-add"
1618 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1619 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1620 a8083063 Iustin Pop
1621 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1622 a8083063 Iustin Pop
    """Build hooks env.
1623 a8083063 Iustin Pop

1624 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1625 a8083063 Iustin Pop

1626 a8083063 Iustin Pop
    """
1627 a8083063 Iustin Pop
    env = {
1628 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1629 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1630 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1631 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1632 a8083063 Iustin Pop
      }
1633 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1634 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1635 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1636 a8083063 Iustin Pop
1637 a8083063 Iustin Pop
  def CheckPrereq(self):
1638 a8083063 Iustin Pop
    """Check prerequisites.
1639 a8083063 Iustin Pop

1640 a8083063 Iustin Pop
    This checks:
1641 a8083063 Iustin Pop
     - the new node is not already in the config
1642 a8083063 Iustin Pop
     - it is resolvable
1643 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1644 a8083063 Iustin Pop

1645 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1646 a8083063 Iustin Pop

1647 a8083063 Iustin Pop
    """
1648 a8083063 Iustin Pop
    node_name = self.op.node_name
1649 a8083063 Iustin Pop
    cfg = self.cfg
1650 a8083063 Iustin Pop
1651 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1652 a8083063 Iustin Pop
1653 bcf043c9 Iustin Pop
    node = dns_data.name
1654 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1655 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1656 a8083063 Iustin Pop
    if secondary_ip is None:
1657 a8083063 Iustin Pop
      secondary_ip = primary_ip
1658 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1659 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1660 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1661 e7c6e02b Michael Hanselmann
1662 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1663 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1664 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1665 e7c6e02b Michael Hanselmann
                                 node)
1666 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1667 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1668 a8083063 Iustin Pop
1669 a8083063 Iustin Pop
    for existing_node_name in node_list:
1670 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1671 e7c6e02b Michael Hanselmann
1672 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1673 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1674 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1675 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1676 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1677 e7c6e02b Michael Hanselmann
        continue
1678 e7c6e02b Michael Hanselmann
1679 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1680 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1681 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1682 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1683 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1684 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1685 a8083063 Iustin Pop
1686 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1687 a8083063 Iustin Pop
    # same as for the master
1688 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1689 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1690 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1691 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1692 a8083063 Iustin Pop
      if master_singlehomed:
1693 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1694 3ecf6786 Iustin Pop
                                   " new node has one")
1695 a8083063 Iustin Pop
      else:
1696 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1697 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1698 a8083063 Iustin Pop
1699 a8083063 Iustin Pop
    # checks reachablity
1700 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1701 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1702 a8083063 Iustin Pop
1703 a8083063 Iustin Pop
    if not newbie_singlehomed:
1704 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1705 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1706 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1707 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1708 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1709 a8083063 Iustin Pop
1710 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1711 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1712 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1713 a8083063 Iustin Pop
1714 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1715 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1716 a8083063 Iustin Pop

1717 a8083063 Iustin Pop
    """
1718 a8083063 Iustin Pop
    new_node = self.new_node
1719 a8083063 Iustin Pop
    node = new_node.name
1720 a8083063 Iustin Pop
1721 a8083063 Iustin Pop
    # check connectivity
1722 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1723 a8083063 Iustin Pop
    if result:
1724 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1725 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1726 a8083063 Iustin Pop
                    (node, result))
1727 a8083063 Iustin Pop
      else:
1728 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1729 3ecf6786 Iustin Pop
                                 " node version %s" %
1730 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1731 a8083063 Iustin Pop
    else:
1732 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1733 a8083063 Iustin Pop
1734 a8083063 Iustin Pop
    # setup ssh on node
1735 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1736 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1737 a8083063 Iustin Pop
    keyarray = []
1738 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1739 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1740 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1741 a8083063 Iustin Pop
1742 a8083063 Iustin Pop
    for i in keyfiles:
1743 a8083063 Iustin Pop
      f = open(i, 'r')
1744 a8083063 Iustin Pop
      try:
1745 a8083063 Iustin Pop
        keyarray.append(f.read())
1746 a8083063 Iustin Pop
      finally:
1747 a8083063 Iustin Pop
        f.close()
1748 a8083063 Iustin Pop
1749 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1750 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1751 a8083063 Iustin Pop
1752 a8083063 Iustin Pop
    if not result:
1753 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1754 a8083063 Iustin Pop
1755 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1756 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1757 c8a0948f Michael Hanselmann
1758 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1759 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1760 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1761 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1762 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1763 16abfbc2 Alexander Schreiber
                                    10, False):
1764 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1765 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1766 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1767 a8083063 Iustin Pop
1768 5c0527ed Guido Trotter
    node_verify_list = [self.sstore.GetMasterNode()]
1769 5c0527ed Guido Trotter
    node_verify_param = {
1770 5c0527ed Guido Trotter
      'nodelist': [node],
1771 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1772 5c0527ed Guido Trotter
    }
1773 5c0527ed Guido Trotter
1774 5c0527ed Guido Trotter
    result = rpc.call_node_verify(node_verify_list, node_verify_param)
1775 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1776 5c0527ed Guido Trotter
      if not result[verifier]:
1777 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1778 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1779 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1780 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1781 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1782 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1783 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1784 ff98055b Iustin Pop
1785 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1786 a8083063 Iustin Pop
    # including the node just added
1787 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1788 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1789 102b115b Michael Hanselmann
    if not self.op.readd:
1790 102b115b Michael Hanselmann
      dist_nodes.append(node)
1791 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1792 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1793 a8083063 Iustin Pop
1794 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1795 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1796 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1797 a8083063 Iustin Pop
      for to_node in dist_nodes:
1798 a8083063 Iustin Pop
        if not result[to_node]:
1799 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1800 a8083063 Iustin Pop
                       (fname, to_node))
1801 a8083063 Iustin Pop
1802 3d1e7706 Guido Trotter
    to_copy = self.sstore.GetFileList()
1803 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1804 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1805 a8083063 Iustin Pop
    for fname in to_copy:
1806 b5602d15 Guido Trotter
      result = rpc.call_upload_file([node], fname)
1807 b5602d15 Guido Trotter
      if not result[node]:
1808 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1809 a8083063 Iustin Pop
1810 d8470559 Michael Hanselmann
    if self.op.readd:
1811 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1812 d8470559 Michael Hanselmann
    else:
1813 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1814 a8083063 Iustin Pop
1815 a8083063 Iustin Pop
1816 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1817 a8083063 Iustin Pop
  """Query cluster configuration.
1818 a8083063 Iustin Pop

1819 a8083063 Iustin Pop
  """
1820 a8083063 Iustin Pop
  _OP_REQP = []
1821 59322403 Iustin Pop
  REQ_MASTER = False
1822 642339cf Guido Trotter
  REQ_BGL = False
1823 642339cf Guido Trotter
1824 642339cf Guido Trotter
  def ExpandNames(self):
1825 642339cf Guido Trotter
    self.needed_locks = {}
1826 a8083063 Iustin Pop
1827 a8083063 Iustin Pop
  def CheckPrereq(self):
1828 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1829 a8083063 Iustin Pop

1830 a8083063 Iustin Pop
    """
1831 a8083063 Iustin Pop
    pass
1832 a8083063 Iustin Pop
1833 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1834 a8083063 Iustin Pop
    """Return cluster config.
1835 a8083063 Iustin Pop

1836 a8083063 Iustin Pop
    """
1837 a8083063 Iustin Pop
    result = {
1838 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1839 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1840 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1841 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1842 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1843 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1844 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1845 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1846 8a12ce45 Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
1847 a8083063 Iustin Pop
      }
1848 a8083063 Iustin Pop
1849 a8083063 Iustin Pop
    return result
1850 a8083063 Iustin Pop
1851 a8083063 Iustin Pop
1852 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1853 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1854 a8083063 Iustin Pop

1855 a8083063 Iustin Pop
  """
1856 a8083063 Iustin Pop
  _OP_REQP = []
1857 642339cf Guido Trotter
  REQ_BGL = False
1858 642339cf Guido Trotter
1859 642339cf Guido Trotter
  def ExpandNames(self):
1860 642339cf Guido Trotter
    self.needed_locks = {}
1861 a8083063 Iustin Pop
1862 a8083063 Iustin Pop
  def CheckPrereq(self):
1863 a8083063 Iustin Pop
    """No prerequisites.
1864 a8083063 Iustin Pop

1865 a8083063 Iustin Pop
    """
1866 a8083063 Iustin Pop
    pass
1867 a8083063 Iustin Pop
1868 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1869 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1870 a8083063 Iustin Pop

1871 a8083063 Iustin Pop
    """
1872 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1873 a8083063 Iustin Pop
1874 a8083063 Iustin Pop
1875 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1876 a8083063 Iustin Pop
  """Bring up an instance's disks.
1877 a8083063 Iustin Pop

1878 a8083063 Iustin Pop
  """
1879 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1880 f22a8ba3 Guido Trotter
  REQ_BGL = False
1881 f22a8ba3 Guido Trotter
1882 f22a8ba3 Guido Trotter
  def ExpandNames(self):
1883 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
1884 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
1885 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1886 f22a8ba3 Guido Trotter
1887 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
1888 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
1889 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
1890 a8083063 Iustin Pop
1891 a8083063 Iustin Pop
  def CheckPrereq(self):
1892 a8083063 Iustin Pop
    """Check prerequisites.
1893 a8083063 Iustin Pop

1894 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1895 a8083063 Iustin Pop

1896 a8083063 Iustin Pop
    """
1897 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1898 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
1899 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
1900 a8083063 Iustin Pop
1901 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1902 a8083063 Iustin Pop
    """Activate the disks.
1903 a8083063 Iustin Pop

1904 a8083063 Iustin Pop
    """
1905 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1906 a8083063 Iustin Pop
    if not disks_ok:
1907 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1908 a8083063 Iustin Pop
1909 a8083063 Iustin Pop
    return disks_info
1910 a8083063 Iustin Pop
1911 a8083063 Iustin Pop
1912 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1913 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1914 a8083063 Iustin Pop

1915 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1916 a8083063 Iustin Pop

1917 a8083063 Iustin Pop
  Args:
1918 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1919 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1920 a8083063 Iustin Pop
                        in an error return from the function
1921 a8083063 Iustin Pop

1922 a8083063 Iustin Pop
  Returns:
1923 a8083063 Iustin Pop
    false if the operation failed
1924 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1925 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1926 a8083063 Iustin Pop
  """
1927 a8083063 Iustin Pop
  device_info = []
1928 a8083063 Iustin Pop
  disks_ok = True
1929 fdbd668d Iustin Pop
  iname = instance.name
1930 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1931 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1932 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1933 fdbd668d Iustin Pop
1934 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1935 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1936 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1937 fdbd668d Iustin Pop
  # SyncSource, etc.)
1938 fdbd668d Iustin Pop
1939 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1940 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1941 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1942 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1943 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1944 a8083063 Iustin Pop
      if not result:
1945 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1946 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1947 fdbd668d Iustin Pop
        if not ignore_secondaries:
1948 a8083063 Iustin Pop
          disks_ok = False
1949 fdbd668d Iustin Pop
1950 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1951 fdbd668d Iustin Pop
1952 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1953 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1954 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1955 fdbd668d Iustin Pop
      if node != instance.primary_node:
1956 fdbd668d Iustin Pop
        continue
1957 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
1958 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1959 fdbd668d Iustin Pop
      if not result:
1960 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1961 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1962 fdbd668d Iustin Pop
        disks_ok = False
1963 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1964 a8083063 Iustin Pop
1965 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1966 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1967 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1968 b352ab5b Iustin Pop
  for disk in instance.disks:
1969 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1970 b352ab5b Iustin Pop
1971 a8083063 Iustin Pop
  return disks_ok, device_info
1972 a8083063 Iustin Pop
1973 a8083063 Iustin Pop
1974 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1975 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1976 3ecf6786 Iustin Pop

1977 3ecf6786 Iustin Pop
  """
1978 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1979 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1980 fe7b0351 Michael Hanselmann
  if not disks_ok:
1981 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1982 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1983 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1984 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1985 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1986 fe7b0351 Michael Hanselmann
1987 fe7b0351 Michael Hanselmann
1988 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1989 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1990 a8083063 Iustin Pop

1991 a8083063 Iustin Pop
  """
1992 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1993 f22a8ba3 Guido Trotter
  REQ_BGL = False
1994 f22a8ba3 Guido Trotter
1995 f22a8ba3 Guido Trotter
  def ExpandNames(self):
1996 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
1997 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
1998 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1999 f22a8ba3 Guido Trotter
2000 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2001 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2002 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2003 a8083063 Iustin Pop
2004 a8083063 Iustin Pop
  def CheckPrereq(self):
2005 a8083063 Iustin Pop
    """Check prerequisites.
2006 a8083063 Iustin Pop

2007 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2008 a8083063 Iustin Pop

2009 a8083063 Iustin Pop
    """
2010 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2011 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2012 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2013 a8083063 Iustin Pop
2014 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2015 a8083063 Iustin Pop
    """Deactivate the disks
2016 a8083063 Iustin Pop

2017 a8083063 Iustin Pop
    """
2018 a8083063 Iustin Pop
    instance = self.instance
2019 155d6c75 Guido Trotter
    _SafeShutdownInstanceDisks(instance, self.cfg)
2020 a8083063 Iustin Pop
2021 a8083063 Iustin Pop
2022 155d6c75 Guido Trotter
def _SafeShutdownInstanceDisks(instance, cfg):
2023 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2024 155d6c75 Guido Trotter

2025 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2026 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2027 155d6c75 Guido Trotter

2028 155d6c75 Guido Trotter
  """
2029 155d6c75 Guido Trotter
  ins_l = rpc.call_instance_list([instance.primary_node])
2030 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2031 155d6c75 Guido Trotter
  if not type(ins_l) is list:
2032 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2033 155d6c75 Guido Trotter
                             instance.primary_node)
2034 155d6c75 Guido Trotter
2035 155d6c75 Guido Trotter
  if instance.name in ins_l:
2036 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2037 155d6c75 Guido Trotter
                             " block devices.")
2038 155d6c75 Guido Trotter
2039 155d6c75 Guido Trotter
  _ShutdownInstanceDisks(instance, cfg)
2040 a8083063 Iustin Pop
2041 a8083063 Iustin Pop
2042 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
2043 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2044 a8083063 Iustin Pop

2045 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2046 a8083063 Iustin Pop

2047 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2048 a8083063 Iustin Pop
  ignored.
2049 a8083063 Iustin Pop

2050 a8083063 Iustin Pop
  """
2051 a8083063 Iustin Pop
  result = True
2052 a8083063 Iustin Pop
  for disk in instance.disks:
2053 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2054 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
2055 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
2056 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
2057 a8083063 Iustin Pop
                     (disk.iv_name, node))
2058 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2059 a8083063 Iustin Pop
          result = False
2060 a8083063 Iustin Pop
  return result
2061 a8083063 Iustin Pop
2062 a8083063 Iustin Pop
2063 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
2064 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2065 d4f16fd9 Iustin Pop

2066 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2067 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2068 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2069 d4f16fd9 Iustin Pop
  exception.
2070 d4f16fd9 Iustin Pop

2071 d4f16fd9 Iustin Pop
  Args:
2072 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
2073 d4f16fd9 Iustin Pop
    - node: the node name
2074 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
2075 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
2076 d4f16fd9 Iustin Pop

2077 d4f16fd9 Iustin Pop
  """
2078 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
2079 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2080 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2081 d4f16fd9 Iustin Pop
                             " information" % (node,))
2082 d4f16fd9 Iustin Pop
2083 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2084 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2085 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2086 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2087 d4f16fd9 Iustin Pop
  if requested > free_mem:
2088 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2089 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2090 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2091 d4f16fd9 Iustin Pop
2092 d4f16fd9 Iustin Pop
2093 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2094 a8083063 Iustin Pop
  """Starts an instance.
2095 a8083063 Iustin Pop

2096 a8083063 Iustin Pop
  """
2097 a8083063 Iustin Pop
  HPATH = "instance-start"
2098 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2099 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2100 e873317a Guido Trotter
  REQ_BGL = False
2101 e873317a Guido Trotter
2102 e873317a Guido Trotter
  def ExpandNames(self):
2103 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2104 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2105 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2106 e873317a Guido Trotter
2107 e873317a Guido Trotter
  def DeclareLocks(self, level):
2108 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2109 e873317a Guido Trotter
      self._LockInstancesNodes()
2110 a8083063 Iustin Pop
2111 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2112 a8083063 Iustin Pop
    """Build hooks env.
2113 a8083063 Iustin Pop

2114 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2115 a8083063 Iustin Pop

2116 a8083063 Iustin Pop
    """
2117 a8083063 Iustin Pop
    env = {
2118 a8083063 Iustin Pop
      "FORCE": self.op.force,
2119 a8083063 Iustin Pop
      }
2120 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2121 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2122 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2123 a8083063 Iustin Pop
    return env, nl, nl
2124 a8083063 Iustin Pop
2125 a8083063 Iustin Pop
  def CheckPrereq(self):
2126 a8083063 Iustin Pop
    """Check prerequisites.
2127 a8083063 Iustin Pop

2128 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2129 a8083063 Iustin Pop

2130 a8083063 Iustin Pop
    """
2131 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2132 e873317a Guido Trotter
    assert self.instance is not None, \
2133 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2134 a8083063 Iustin Pop
2135 a8083063 Iustin Pop
    # check bridges existance
2136 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2137 a8083063 Iustin Pop
2138 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2139 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2140 d4f16fd9 Iustin Pop
                         instance.memory)
2141 d4f16fd9 Iustin Pop
2142 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2143 a8083063 Iustin Pop
    """Start the instance.
2144 a8083063 Iustin Pop

2145 a8083063 Iustin Pop
    """
2146 a8083063 Iustin Pop
    instance = self.instance
2147 a8083063 Iustin Pop
    force = self.op.force
2148 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2149 a8083063 Iustin Pop
2150 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2151 fe482621 Iustin Pop
2152 a8083063 Iustin Pop
    node_current = instance.primary_node
2153 a8083063 Iustin Pop
2154 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2155 a8083063 Iustin Pop
2156 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2157 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2158 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2159 a8083063 Iustin Pop
2160 a8083063 Iustin Pop
2161 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2162 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2163 bf6929a2 Alexander Schreiber

2164 bf6929a2 Alexander Schreiber
  """
2165 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2166 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2167 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2168 e873317a Guido Trotter
  REQ_BGL = False
2169 e873317a Guido Trotter
2170 e873317a Guido Trotter
  def ExpandNames(self):
2171 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2172 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2173 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2174 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2175 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2176 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2177 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2178 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2179 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2180 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2181 e873317a Guido Trotter
2182 e873317a Guido Trotter
  def DeclareLocks(self, level):
2183 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2184 849da276 Guido Trotter
      primary_only = not constants.INSTANCE_REBOOT_FULL
2185 849da276 Guido Trotter
      self._LockInstancesNodes(primary_only=primary_only)
2186 bf6929a2 Alexander Schreiber
2187 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2188 bf6929a2 Alexander Schreiber
    """Build hooks env.
2189 bf6929a2 Alexander Schreiber

2190 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2191 bf6929a2 Alexander Schreiber

2192 bf6929a2 Alexander Schreiber
    """
2193 bf6929a2 Alexander Schreiber
    env = {
2194 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2195 bf6929a2 Alexander Schreiber
      }
2196 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2197 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2198 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2199 bf6929a2 Alexander Schreiber
    return env, nl, nl
2200 bf6929a2 Alexander Schreiber
2201 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2202 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2203 bf6929a2 Alexander Schreiber

2204 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2205 bf6929a2 Alexander Schreiber

2206 bf6929a2 Alexander Schreiber
    """
2207 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2208 e873317a Guido Trotter
    assert self.instance is not None, \
2209 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2210 bf6929a2 Alexander Schreiber
2211 bf6929a2 Alexander Schreiber
    # check bridges existance
2212 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2213 bf6929a2 Alexander Schreiber
2214 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2215 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2216 bf6929a2 Alexander Schreiber

2217 bf6929a2 Alexander Schreiber
    """
2218 bf6929a2 Alexander Schreiber
    instance = self.instance
2219 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2220 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2221 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2222 bf6929a2 Alexander Schreiber
2223 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2224 bf6929a2 Alexander Schreiber
2225 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2226 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2227 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2228 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2229 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2230 bf6929a2 Alexander Schreiber
    else:
2231 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2232 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2233 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2234 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2235 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2236 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2237 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2238 bf6929a2 Alexander Schreiber
2239 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2240 bf6929a2 Alexander Schreiber
2241 bf6929a2 Alexander Schreiber
2242 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2243 a8083063 Iustin Pop
  """Shutdown an instance.
2244 a8083063 Iustin Pop

2245 a8083063 Iustin Pop
  """
2246 a8083063 Iustin Pop
  HPATH = "instance-stop"
2247 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2248 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2249 e873317a Guido Trotter
  REQ_BGL = False
2250 e873317a Guido Trotter
2251 e873317a Guido Trotter
  def ExpandNames(self):
2252 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2253 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2254 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2255 e873317a Guido Trotter
2256 e873317a Guido Trotter
  def DeclareLocks(self, level):
2257 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2258 e873317a Guido Trotter
      self._LockInstancesNodes()
2259 a8083063 Iustin Pop
2260 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2261 a8083063 Iustin Pop
    """Build hooks env.
2262 a8083063 Iustin Pop

2263 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2264 a8083063 Iustin Pop

2265 a8083063 Iustin Pop
    """
2266 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2267 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2268 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2269 a8083063 Iustin Pop
    return env, nl, nl
2270 a8083063 Iustin Pop
2271 a8083063 Iustin Pop
  def CheckPrereq(self):
2272 a8083063 Iustin Pop
    """Check prerequisites.
2273 a8083063 Iustin Pop

2274 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2275 a8083063 Iustin Pop

2276 a8083063 Iustin Pop
    """
2277 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2278 e873317a Guido Trotter
    assert self.instance is not None, \
2279 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2280 a8083063 Iustin Pop
2281 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2282 a8083063 Iustin Pop
    """Shutdown the instance.
2283 a8083063 Iustin Pop

2284 a8083063 Iustin Pop
    """
2285 a8083063 Iustin Pop
    instance = self.instance
2286 a8083063 Iustin Pop
    node_current = instance.primary_node
2287 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2288 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2289 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2290 a8083063 Iustin Pop
2291 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2292 a8083063 Iustin Pop
2293 a8083063 Iustin Pop
2294 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2295 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2296 fe7b0351 Michael Hanselmann

2297 fe7b0351 Michael Hanselmann
  """
2298 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2299 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2300 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2301 4e0b4d2d Guido Trotter
  REQ_BGL = False
2302 4e0b4d2d Guido Trotter
2303 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2304 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2305 4e0b4d2d Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2306 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2307 4e0b4d2d Guido Trotter
2308 4e0b4d2d Guido Trotter
  def DeclareLocks(self, level):
2309 4e0b4d2d Guido Trotter
    if level == locking.LEVEL_NODE:
2310 4e0b4d2d Guido Trotter
      self._LockInstancesNodes()
2311 fe7b0351 Michael Hanselmann
2312 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2313 fe7b0351 Michael Hanselmann
    """Build hooks env.
2314 fe7b0351 Michael Hanselmann

2315 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2316 fe7b0351 Michael Hanselmann

2317 fe7b0351 Michael Hanselmann
    """
2318 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2319 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2320 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2321 fe7b0351 Michael Hanselmann
    return env, nl, nl
2322 fe7b0351 Michael Hanselmann
2323 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2324 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2325 fe7b0351 Michael Hanselmann

2326 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2327 fe7b0351 Michael Hanselmann

2328 fe7b0351 Michael Hanselmann
    """
2329 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2330 4e0b4d2d Guido Trotter
    assert instance is not None, \
2331 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2332 4e0b4d2d Guido Trotter
2333 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2334 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2335 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2336 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2337 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2338 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2339 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2340 fe7b0351 Michael Hanselmann
    if remote_info:
2341 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2342 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2343 3ecf6786 Iustin Pop
                                  instance.primary_node))
2344 d0834de3 Michael Hanselmann
2345 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2346 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2347 d0834de3 Michael Hanselmann
      # OS verification
2348 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2349 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2350 d0834de3 Michael Hanselmann
      if pnode is None:
2351 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2352 3ecf6786 Iustin Pop
                                   self.op.pnode)
2353 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2354 dfa96ded Guido Trotter
      if not os_obj:
2355 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2356 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2357 d0834de3 Michael Hanselmann
2358 fe7b0351 Michael Hanselmann
    self.instance = instance
2359 fe7b0351 Michael Hanselmann
2360 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2361 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2362 fe7b0351 Michael Hanselmann

2363 fe7b0351 Michael Hanselmann
    """
2364 fe7b0351 Michael Hanselmann
    inst = self.instance
2365 fe7b0351 Michael Hanselmann
2366 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2367 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2368 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2369 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2370 d0834de3 Michael Hanselmann
2371 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2372 fe7b0351 Michael Hanselmann
    try:
2373 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2374 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2375 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2376 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2377 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2378 fe7b0351 Michael Hanselmann
    finally:
2379 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2380 fe7b0351 Michael Hanselmann
2381 fe7b0351 Michael Hanselmann
2382 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2383 decd5f45 Iustin Pop
  """Rename an instance.
2384 decd5f45 Iustin Pop

2385 decd5f45 Iustin Pop
  """
2386 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2387 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2388 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2389 decd5f45 Iustin Pop
2390 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2391 decd5f45 Iustin Pop
    """Build hooks env.
2392 decd5f45 Iustin Pop

2393 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2394 decd5f45 Iustin Pop

2395 decd5f45 Iustin Pop
    """
2396 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2397 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2398 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2399 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2400 decd5f45 Iustin Pop
    return env, nl, nl
2401 decd5f45 Iustin Pop
2402 decd5f45 Iustin Pop
  def CheckPrereq(self):
2403 decd5f45 Iustin Pop
    """Check prerequisites.
2404 decd5f45 Iustin Pop

2405 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2406 decd5f45 Iustin Pop

2407 decd5f45 Iustin Pop
    """
2408 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2409 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2410 decd5f45 Iustin Pop
    if instance is None:
2411 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2412 decd5f45 Iustin Pop
                                 self.op.instance_name)
2413 decd5f45 Iustin Pop
    if instance.status != "down":
2414 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2415 decd5f45 Iustin Pop
                                 self.op.instance_name)
2416 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2417 decd5f45 Iustin Pop
    if remote_info:
2418 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2419 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2420 decd5f45 Iustin Pop
                                  instance.primary_node))
2421 decd5f45 Iustin Pop
    self.instance = instance
2422 decd5f45 Iustin Pop
2423 decd5f45 Iustin Pop
    # new name verification
2424 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2425 decd5f45 Iustin Pop
2426 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2427 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2428 7bde3275 Guido Trotter
    if new_name in instance_list:
2429 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2430 c09f363f Manuel Franceschini
                                 new_name)
2431 7bde3275 Guido Trotter
2432 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2433 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2434 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2435 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2436 decd5f45 Iustin Pop
2437 decd5f45 Iustin Pop
2438 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2439 decd5f45 Iustin Pop
    """Reinstall the instance.
2440 decd5f45 Iustin Pop

2441 decd5f45 Iustin Pop
    """
2442 decd5f45 Iustin Pop
    inst = self.instance
2443 decd5f45 Iustin Pop
    old_name = inst.name
2444 decd5f45 Iustin Pop
2445 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2446 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2447 b23c4333 Manuel Franceschini
2448 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2449 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2450 74b5913f Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
2451 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2452 decd5f45 Iustin Pop
2453 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2454 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2455 decd5f45 Iustin Pop
2456 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2457 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2458 b23c4333 Manuel Franceschini
      result = rpc.call_file_storage_dir_rename(inst.primary_node,
2459 b23c4333 Manuel Franceschini
                                                old_file_storage_dir,
2460 b23c4333 Manuel Franceschini
                                                new_file_storage_dir)
2461 b23c4333 Manuel Franceschini
2462 b23c4333 Manuel Franceschini
      if not result:
2463 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2464 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2465 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2466 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2467 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2468 b23c4333 Manuel Franceschini
2469 b23c4333 Manuel Franceschini
      if not result[0]:
2470 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2471 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2472 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2473 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2474 b23c4333 Manuel Franceschini
2475 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2476 decd5f45 Iustin Pop
    try:
2477 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2478 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2479 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2480 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2481 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2482 decd5f45 Iustin Pop
        logger.Error(msg)
2483 decd5f45 Iustin Pop
    finally:
2484 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2485 decd5f45 Iustin Pop
2486 decd5f45 Iustin Pop
2487 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2488 a8083063 Iustin Pop
  """Remove an instance.
2489 a8083063 Iustin Pop

2490 a8083063 Iustin Pop
  """
2491 a8083063 Iustin Pop
  HPATH = "instance-remove"
2492 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2493 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2494 cf472233 Guido Trotter
  REQ_BGL = False
2495 cf472233 Guido Trotter
2496 cf472233 Guido Trotter
  def ExpandNames(self):
2497 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
2498 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2499 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2500 cf472233 Guido Trotter
2501 cf472233 Guido Trotter
  def DeclareLocks(self, level):
2502 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
2503 cf472233 Guido Trotter
      self._LockInstancesNodes()
2504 a8083063 Iustin Pop
2505 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2506 a8083063 Iustin Pop
    """Build hooks env.
2507 a8083063 Iustin Pop

2508 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2509 a8083063 Iustin Pop

2510 a8083063 Iustin Pop
    """
2511 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2512 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2513 a8083063 Iustin Pop
    return env, nl, nl
2514 a8083063 Iustin Pop
2515 a8083063 Iustin Pop
  def CheckPrereq(self):
2516 a8083063 Iustin Pop
    """Check prerequisites.
2517 a8083063 Iustin Pop

2518 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2519 a8083063 Iustin Pop

2520 a8083063 Iustin Pop
    """
2521 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2522 cf472233 Guido Trotter
    assert self.instance is not None, \
2523 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2524 a8083063 Iustin Pop
2525 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2526 a8083063 Iustin Pop
    """Remove the instance.
2527 a8083063 Iustin Pop

2528 a8083063 Iustin Pop
    """
2529 a8083063 Iustin Pop
    instance = self.instance
2530 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2531 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2532 a8083063 Iustin Pop
2533 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2534 1d67656e Iustin Pop
      if self.op.ignore_failures:
2535 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2536 1d67656e Iustin Pop
      else:
2537 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2538 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2539 a8083063 Iustin Pop
2540 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2541 a8083063 Iustin Pop
2542 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2543 1d67656e Iustin Pop
      if self.op.ignore_failures:
2544 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2545 1d67656e Iustin Pop
      else:
2546 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2547 a8083063 Iustin Pop
2548 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2549 a8083063 Iustin Pop
2550 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2551 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2552 a8083063 Iustin Pop
2553 a8083063 Iustin Pop
2554 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2555 a8083063 Iustin Pop
  """Logical unit for querying instances.
2556 a8083063 Iustin Pop

2557 a8083063 Iustin Pop
  """
2558 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2559 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2560 a8083063 Iustin Pop
2561 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2562 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2563 57a2fb91 Iustin Pop
    self.static_fields = frozenset([
2564 57a2fb91 Iustin Pop
      "name", "os", "pnode", "snodes",
2565 57a2fb91 Iustin Pop
      "admin_state", "admin_ram",
2566 57a2fb91 Iustin Pop
      "disk_template", "ip", "mac", "bridge",
2567 57a2fb91 Iustin Pop
      "sda_size", "sdb_size", "vcpus", "tags",
2568 57a2fb91 Iustin Pop
      "network_port", "kernel_path", "initrd_path",
2569 57a2fb91 Iustin Pop
      "hvm_boot_order", "hvm_acpi", "hvm_pae",
2570 57a2fb91 Iustin Pop
      "hvm_cdrom_image_path", "hvm_nic_type",
2571 57a2fb91 Iustin Pop
      "hvm_disk_type", "vnc_bind_address",
2572 57a2fb91 Iustin Pop
      ])
2573 57a2fb91 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
2574 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2575 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2576 a8083063 Iustin Pop
2577 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2578 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2579 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2580 7eb9d8f7 Guido Trotter
2581 57a2fb91 Iustin Pop
    if self.op.names:
2582 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
2583 7eb9d8f7 Guido Trotter
    else:
2584 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
2585 7eb9d8f7 Guido Trotter
2586 57a2fb91 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
2587 57a2fb91 Iustin Pop
    if self.do_locking:
2588 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2589 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
2590 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2591 7eb9d8f7 Guido Trotter
2592 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2593 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
2594 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2595 7eb9d8f7 Guido Trotter
2596 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2597 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2598 7eb9d8f7 Guido Trotter

2599 7eb9d8f7 Guido Trotter
    """
2600 57a2fb91 Iustin Pop
    pass
2601 069dcc86 Iustin Pop
2602 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2603 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2604 a8083063 Iustin Pop

2605 a8083063 Iustin Pop
    """
2606 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
2607 57a2fb91 Iustin Pop
    if self.do_locking:
2608 57a2fb91 Iustin Pop
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2609 57a2fb91 Iustin Pop
    else:
2610 57a2fb91 Iustin Pop
      instance_names = all_info.keys()
2611 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
2612 a8083063 Iustin Pop
2613 a8083063 Iustin Pop
    # begin data gathering
2614 a8083063 Iustin Pop
2615 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2616 a8083063 Iustin Pop
2617 a8083063 Iustin Pop
    bad_nodes = []
2618 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2619 a8083063 Iustin Pop
      live_data = {}
2620 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2621 a8083063 Iustin Pop
      for name in nodes:
2622 a8083063 Iustin Pop
        result = node_data[name]
2623 a8083063 Iustin Pop
        if result:
2624 a8083063 Iustin Pop
          live_data.update(result)
2625 a8083063 Iustin Pop
        elif result == False:
2626 a8083063 Iustin Pop
          bad_nodes.append(name)
2627 a8083063 Iustin Pop
        # else no instance is alive
2628 a8083063 Iustin Pop
    else:
2629 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2630 a8083063 Iustin Pop
2631 a8083063 Iustin Pop
    # end data gathering
2632 a8083063 Iustin Pop
2633 a8083063 Iustin Pop
    output = []
2634 a8083063 Iustin Pop
    for instance in instance_list:
2635 a8083063 Iustin Pop
      iout = []
2636 a8083063 Iustin Pop
      for field in self.op.output_fields:
2637 a8083063 Iustin Pop
        if field == "name":
2638 a8083063 Iustin Pop
          val = instance.name
2639 a8083063 Iustin Pop
        elif field == "os":
2640 a8083063 Iustin Pop
          val = instance.os
2641 a8083063 Iustin Pop
        elif field == "pnode":
2642 a8083063 Iustin Pop
          val = instance.primary_node
2643 a8083063 Iustin Pop
        elif field == "snodes":
2644 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2645 a8083063 Iustin Pop
        elif field == "admin_state":
2646 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2647 a8083063 Iustin Pop
        elif field == "oper_state":
2648 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2649 8a23d2d3 Iustin Pop
            val = None
2650 a8083063 Iustin Pop
          else:
2651 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2652 d8052456 Iustin Pop
        elif field == "status":
2653 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2654 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2655 d8052456 Iustin Pop
          else:
2656 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2657 d8052456 Iustin Pop
            if running:
2658 d8052456 Iustin Pop
              if instance.status != "down":
2659 d8052456 Iustin Pop
                val = "running"
2660 d8052456 Iustin Pop
              else:
2661 d8052456 Iustin Pop
                val = "ERROR_up"
2662 d8052456 Iustin Pop
            else:
2663 d8052456 Iustin Pop
              if instance.status != "down":
2664 d8052456 Iustin Pop
                val = "ERROR_down"
2665 d8052456 Iustin Pop
              else:
2666 d8052456 Iustin Pop
                val = "ADMIN_down"
2667 a8083063 Iustin Pop
        elif field == "admin_ram":
2668 a8083063 Iustin Pop
          val = instance.memory
2669 a8083063 Iustin Pop
        elif field == "oper_ram":
2670 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2671 8a23d2d3 Iustin Pop
            val = None
2672 a8083063 Iustin Pop
          elif instance.name in live_data:
2673 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2674 a8083063 Iustin Pop
          else:
2675 a8083063 Iustin Pop
            val = "-"
2676 a8083063 Iustin Pop
        elif field == "disk_template":
2677 a8083063 Iustin Pop
          val = instance.disk_template
2678 a8083063 Iustin Pop
        elif field == "ip":
2679 a8083063 Iustin Pop
          val = instance.nics[0].ip
2680 a8083063 Iustin Pop
        elif field == "bridge":
2681 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2682 a8083063 Iustin Pop
        elif field == "mac":
2683 a8083063 Iustin Pop
          val = instance.nics[0].mac
2684 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2685 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2686 644eeef9 Iustin Pop
          if disk is None:
2687 8a23d2d3 Iustin Pop
            val = None
2688 644eeef9 Iustin Pop
          else:
2689 644eeef9 Iustin Pop
            val = disk.size
2690 d6d415e8 Iustin Pop
        elif field == "vcpus":
2691 d6d415e8 Iustin Pop
          val = instance.vcpus
2692 130a6a6f Iustin Pop
        elif field == "tags":
2693 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2694 3fb1e1c5 Alexander Schreiber
        elif field in ("network_port", "kernel_path", "initrd_path",
2695 3fb1e1c5 Alexander Schreiber
                       "hvm_boot_order", "hvm_acpi", "hvm_pae",
2696 3fb1e1c5 Alexander Schreiber
                       "hvm_cdrom_image_path", "hvm_nic_type",
2697 3fb1e1c5 Alexander Schreiber
                       "hvm_disk_type", "vnc_bind_address"):
2698 3fb1e1c5 Alexander Schreiber
          val = getattr(instance, field, None)
2699 3fb1e1c5 Alexander Schreiber
          if val is not None:
2700 3fb1e1c5 Alexander Schreiber
            pass
2701 3fb1e1c5 Alexander Schreiber
          elif field in ("hvm_nic_type", "hvm_disk_type",
2702 3fb1e1c5 Alexander Schreiber
                         "kernel_path", "initrd_path"):
2703 3fb1e1c5 Alexander Schreiber
            val = "default"
2704 3fb1e1c5 Alexander Schreiber
          else:
2705 3fb1e1c5 Alexander Schreiber
            val = "-"
2706 a8083063 Iustin Pop
        else:
2707 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2708 a8083063 Iustin Pop
        iout.append(val)
2709 a8083063 Iustin Pop
      output.append(iout)
2710 a8083063 Iustin Pop
2711 a8083063 Iustin Pop
    return output
2712 a8083063 Iustin Pop
2713 a8083063 Iustin Pop
2714 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2715 a8083063 Iustin Pop
  """Failover an instance.
2716 a8083063 Iustin Pop

2717 a8083063 Iustin Pop
  """
2718 a8083063 Iustin Pop
  HPATH = "instance-failover"
2719 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2720 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2721 c9e5c064 Guido Trotter
  REQ_BGL = False
2722 c9e5c064 Guido Trotter
2723 c9e5c064 Guido Trotter
  def ExpandNames(self):
2724 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2725 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2726 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2727 c9e5c064 Guido Trotter
2728 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2729 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2730 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2731 a8083063 Iustin Pop
2732 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2733 a8083063 Iustin Pop
    """Build hooks env.
2734 a8083063 Iustin Pop

2735 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2736 a8083063 Iustin Pop

2737 a8083063 Iustin Pop
    """
2738 a8083063 Iustin Pop
    env = {
2739 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2740 a8083063 Iustin Pop
      }
2741 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2742 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2743 a8083063 Iustin Pop
    return env, nl, nl
2744 a8083063 Iustin Pop
2745 a8083063 Iustin Pop
  def CheckPrereq(self):
2746 a8083063 Iustin Pop
    """Check prerequisites.
2747 a8083063 Iustin Pop

2748 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2749 a8083063 Iustin Pop

2750 a8083063 Iustin Pop
    """
2751 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2752 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2753 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2754 a8083063 Iustin Pop
2755 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2756 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2757 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2758 2a710df1 Michael Hanselmann
2759 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2760 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2761 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2762 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2763 2a710df1 Michael Hanselmann
2764 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2765 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2766 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2767 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2768 3a7c308e Guido Trotter
2769 a8083063 Iustin Pop
    # check bridge existance
2770 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2771 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2772 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2773 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2774 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2775 a8083063 Iustin Pop
2776 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2777 a8083063 Iustin Pop
    """Failover an instance.
2778 a8083063 Iustin Pop

2779 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2780 a8083063 Iustin Pop
    starting it on the secondary.
2781 a8083063 Iustin Pop

2782 a8083063 Iustin Pop
    """
2783 a8083063 Iustin Pop
    instance = self.instance
2784 a8083063 Iustin Pop
2785 a8083063 Iustin Pop
    source_node = instance.primary_node
2786 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2787 a8083063 Iustin Pop
2788 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2789 a8083063 Iustin Pop
    for dev in instance.disks:
2790 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2791 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2792 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2793 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2794 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2795 a8083063 Iustin Pop
2796 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2797 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2798 a8083063 Iustin Pop
                (instance.name, source_node))
2799 a8083063 Iustin Pop
2800 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2801 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2802 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2803 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2804 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2805 24a40d57 Iustin Pop
      else:
2806 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2807 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2808 a8083063 Iustin Pop
2809 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2810 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2811 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2812 a8083063 Iustin Pop
2813 a8083063 Iustin Pop
    instance.primary_node = target_node
2814 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2815 b6102dab Guido Trotter
    self.cfg.Update(instance)
2816 a8083063 Iustin Pop
2817 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2818 12a0cfbe Guido Trotter
    if instance.status == "up":
2819 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2820 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2821 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2822 12a0cfbe Guido Trotter
2823 12a0cfbe Guido Trotter
      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2824 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2825 12a0cfbe Guido Trotter
      if not disks_ok:
2826 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2827 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2828 a8083063 Iustin Pop
2829 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2830 12a0cfbe Guido Trotter
      if not rpc.call_instance_start(target_node, instance, None):
2831 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2832 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2833 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2834 a8083063 Iustin Pop
2835 a8083063 Iustin Pop
2836 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2837 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2838 a8083063 Iustin Pop

2839 a8083063 Iustin Pop
  This always creates all devices.
2840 a8083063 Iustin Pop

2841 a8083063 Iustin Pop
  """
2842 a8083063 Iustin Pop
  if device.children:
2843 a8083063 Iustin Pop
    for child in device.children:
2844 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2845 a8083063 Iustin Pop
        return False
2846 a8083063 Iustin Pop
2847 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2848 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2849 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2850 a8083063 Iustin Pop
  if not new_id:
2851 a8083063 Iustin Pop
    return False
2852 a8083063 Iustin Pop
  if device.physical_id is None:
2853 a8083063 Iustin Pop
    device.physical_id = new_id
2854 a8083063 Iustin Pop
  return True
2855 a8083063 Iustin Pop
2856 a8083063 Iustin Pop
2857 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2858 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2859 a8083063 Iustin Pop

2860 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2861 a8083063 Iustin Pop
  all its children.
2862 a8083063 Iustin Pop

2863 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2864 a8083063 Iustin Pop

2865 a8083063 Iustin Pop
  """
2866 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2867 a8083063 Iustin Pop
    force = True
2868 a8083063 Iustin Pop
  if device.children:
2869 a8083063 Iustin Pop
    for child in device.children:
2870 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2871 3f78eef2 Iustin Pop
                                        child, force, info):
2872 a8083063 Iustin Pop
        return False
2873 a8083063 Iustin Pop
2874 a8083063 Iustin Pop
  if not force:
2875 a8083063 Iustin Pop
    return True
2876 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2877 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2878 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2879 a8083063 Iustin Pop
  if not new_id:
2880 a8083063 Iustin Pop
    return False
2881 a8083063 Iustin Pop
  if device.physical_id is None:
2882 a8083063 Iustin Pop
    device.physical_id = new_id
2883 a8083063 Iustin Pop
  return True
2884 a8083063 Iustin Pop
2885 a8083063 Iustin Pop
2886 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2887 923b1523 Iustin Pop
  """Generate a suitable LV name.
2888 923b1523 Iustin Pop

2889 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2890 923b1523 Iustin Pop

2891 923b1523 Iustin Pop
  """
2892 923b1523 Iustin Pop
  results = []
2893 923b1523 Iustin Pop
  for val in exts:
2894 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2895 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2896 923b1523 Iustin Pop
  return results
2897 923b1523 Iustin Pop
2898 923b1523 Iustin Pop
2899 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2900 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2901 a1f445d3 Iustin Pop

2902 a1f445d3 Iustin Pop
  """
2903 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2904 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2905 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2906 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2907 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2908 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2909 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2910 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2911 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2912 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2913 a1f445d3 Iustin Pop
  return drbd_dev
2914 a1f445d3 Iustin Pop
2915 7c0d6283 Michael Hanselmann
2916 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2917 a8083063 Iustin Pop
                          instance_name, primary_node,
2918 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
2919 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
2920 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2921 a8083063 Iustin Pop

2922 a8083063 Iustin Pop
  """
2923 a8083063 Iustin Pop
  #TODO: compute space requirements
2924 a8083063 Iustin Pop
2925 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2926 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2927 a8083063 Iustin Pop
    disks = []
2928 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2929 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2930 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2931 923b1523 Iustin Pop
2932 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2933 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2934 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2935 a8083063 Iustin Pop
                           iv_name = "sda")
2936 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2937 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2938 a8083063 Iustin Pop
                           iv_name = "sdb")
2939 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2940 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2941 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2942 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2943 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2944 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2945 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2946 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2947 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2948 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2949 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2950 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2951 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
2952 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
2953 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
2954 0f1a06e3 Manuel Franceschini
2955 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
2956 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
2957 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
2958 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
2959 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
2960 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
2961 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
2962 a8083063 Iustin Pop
  else:
2963 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2964 a8083063 Iustin Pop
  return disks
2965 a8083063 Iustin Pop
2966 a8083063 Iustin Pop
2967 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2968 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2969 3ecf6786 Iustin Pop

2970 3ecf6786 Iustin Pop
  """
2971 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2972 a0c3fea1 Michael Hanselmann
2973 a0c3fea1 Michael Hanselmann
2974 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2975 a8083063 Iustin Pop
  """Create all disks for an instance.
2976 a8083063 Iustin Pop

2977 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2978 a8083063 Iustin Pop

2979 a8083063 Iustin Pop
  Args:
2980 a8083063 Iustin Pop
    instance: the instance object
2981 a8083063 Iustin Pop

2982 a8083063 Iustin Pop
  Returns:
2983 a8083063 Iustin Pop
    True or False showing the success of the creation process
2984 a8083063 Iustin Pop

2985 a8083063 Iustin Pop
  """
2986 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2987 a0c3fea1 Michael Hanselmann
2988 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2989 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2990 0f1a06e3 Manuel Franceschini
    result = rpc.call_file_storage_dir_create(instance.primary_node,
2991 0f1a06e3 Manuel Franceschini
                                              file_storage_dir)
2992 0f1a06e3 Manuel Franceschini
2993 0f1a06e3 Manuel Franceschini
    if not result:
2994 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
2995 0f1a06e3 Manuel Franceschini
      return False
2996 0f1a06e3 Manuel Franceschini
2997 0f1a06e3 Manuel Franceschini
    if not result[0]:
2998 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
2999 0f1a06e3 Manuel Franceschini
      return False
3000 0f1a06e3 Manuel Franceschini
3001 a8083063 Iustin Pop
  for device in instance.disks:
3002 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
3003 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
3004 a8083063 Iustin Pop
    #HARDCODE
3005 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3006 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
3007 3f78eef2 Iustin Pop
                                        device, False, info):
3008 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
3009 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
3010 a8083063 Iustin Pop
        return False
3011 a8083063 Iustin Pop
    #HARDCODE
3012 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3013 3f78eef2 Iustin Pop
                                    instance, device, info):
3014 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
3015 a8083063 Iustin Pop
                   device.iv_name)
3016 a8083063 Iustin Pop
      return False
3017 1c6e3627 Manuel Franceschini
3018 a8083063 Iustin Pop
  return True
3019 a8083063 Iustin Pop
3020 a8083063 Iustin Pop
3021 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
3022 a8083063 Iustin Pop
  """Remove all disks for an instance.
3023 a8083063 Iustin Pop

3024 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3025 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3026 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3027 a8083063 Iustin Pop
  with `_CreateDisks()`).
3028 a8083063 Iustin Pop

3029 a8083063 Iustin Pop
  Args:
3030 a8083063 Iustin Pop
    instance: the instance object
3031 a8083063 Iustin Pop

3032 a8083063 Iustin Pop
  Returns:
3033 a8083063 Iustin Pop
    True or False showing the success of the removal proces
3034 a8083063 Iustin Pop

3035 a8083063 Iustin Pop
  """
3036 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
3037 a8083063 Iustin Pop
3038 a8083063 Iustin Pop
  result = True
3039 a8083063 Iustin Pop
  for device in instance.disks:
3040 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3041 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
3042 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
3043 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
3044 a8083063 Iustin Pop
                     " continuing anyway" %
3045 a8083063 Iustin Pop
                     (device.iv_name, node))
3046 a8083063 Iustin Pop
        result = False
3047 0f1a06e3 Manuel Franceschini
3048 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3049 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3050 0f1a06e3 Manuel Franceschini
    if not rpc.call_file_storage_dir_remove(instance.primary_node,
3051 0f1a06e3 Manuel Franceschini
                                            file_storage_dir):
3052 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
3053 0f1a06e3 Manuel Franceschini
      result = False
3054 0f1a06e3 Manuel Franceschini
3055 a8083063 Iustin Pop
  return result
3056 a8083063 Iustin Pop
3057 a8083063 Iustin Pop
3058 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
3059 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3060 e2fe6369 Iustin Pop

3061 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
3062 e2fe6369 Iustin Pop

3063 e2fe6369 Iustin Pop
  """
3064 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3065 e2fe6369 Iustin Pop
  req_size_dict = {
3066 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3067 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
3068 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
3069 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
3070 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3071 e2fe6369 Iustin Pop
  }
3072 e2fe6369 Iustin Pop
3073 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3074 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3075 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3076 e2fe6369 Iustin Pop
3077 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3078 e2fe6369 Iustin Pop
3079 e2fe6369 Iustin Pop
3080 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3081 a8083063 Iustin Pop
  """Create an instance.
3082 a8083063 Iustin Pop

3083 a8083063 Iustin Pop
  """
3084 a8083063 Iustin Pop
  HPATH = "instance-add"
3085 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3086 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
3087 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
3088 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
3089 7baf741d Guido Trotter
  REQ_BGL = False
3090 7baf741d Guido Trotter
3091 7baf741d Guido Trotter
  def _ExpandNode(self, node):
3092 7baf741d Guido Trotter
    """Expands and checks one node name.
3093 7baf741d Guido Trotter

3094 7baf741d Guido Trotter
    """
3095 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
3096 7baf741d Guido Trotter
    if node_full is None:
3097 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
3098 7baf741d Guido Trotter
    return node_full
3099 7baf741d Guido Trotter
3100 7baf741d Guido Trotter
  def ExpandNames(self):
3101 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
3102 7baf741d Guido Trotter

3103 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
3104 7baf741d Guido Trotter

3105 7baf741d Guido Trotter
    """
3106 7baf741d Guido Trotter
    self.needed_locks = {}
3107 7baf741d Guido Trotter
3108 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
3109 7baf741d Guido Trotter
    for attr in ["kernel_path", "initrd_path", "pnode", "snode",
3110 7baf741d Guido Trotter
                 "iallocator", "hvm_boot_order", "hvm_acpi", "hvm_pae",
3111 7baf741d Guido Trotter
                 "hvm_cdrom_image_path", "hvm_nic_type", "hvm_disk_type",
3112 7baf741d Guido Trotter
                 "vnc_bind_address"]:
3113 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
3114 7baf741d Guido Trotter
        setattr(self.op, attr, None)
3115 7baf741d Guido Trotter
3116 7baf741d Guido Trotter
    # verify creation mode
3117 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
3118 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
3119 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3120 7baf741d Guido Trotter
                                 self.op.mode)
3121 7baf741d Guido Trotter
    # disk template and mirror node verification
3122 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3123 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
3124 7baf741d Guido Trotter
3125 7baf741d Guido Trotter
    #### instance parameters check
3126 7baf741d Guido Trotter
3127 7baf741d Guido Trotter
    # instance name verification
3128 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
3129 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
3130 7baf741d Guido Trotter
3131 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
3132 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
3133 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
3134 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3135 7baf741d Guido Trotter
                                 instance_name)
3136 7baf741d Guido Trotter
3137 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3138 7baf741d Guido Trotter
3139 7baf741d Guido Trotter
    # ip validity checks
3140 7baf741d Guido Trotter
    ip = getattr(self.op, "ip", None)
3141 7baf741d Guido Trotter
    if ip is None or ip.lower() == "none":
3142 7baf741d Guido Trotter
      inst_ip = None
3143 7baf741d Guido Trotter
    elif ip.lower() == "auto":
3144 7baf741d Guido Trotter
      inst_ip = hostname1.ip
3145 7baf741d Guido Trotter
    else:
3146 7baf741d Guido Trotter
      if not utils.IsValidIP(ip):
3147 7baf741d Guido Trotter
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3148 7baf741d Guido Trotter
                                   " like a valid IP" % ip)
3149 7baf741d Guido Trotter
      inst_ip = ip
3150 7baf741d Guido Trotter
    self.inst_ip = self.op.ip = inst_ip
3151 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
3152 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
3153 7baf741d Guido Trotter
3154 7baf741d Guido Trotter
    # MAC address verification
3155 7baf741d Guido Trotter
    if self.op.mac != "auto":
3156 7baf741d Guido Trotter
      if not utils.IsValidMac(self.op.mac.lower()):
3157 7baf741d Guido Trotter
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3158 7baf741d Guido Trotter
                                   self.op.mac)
3159 7baf741d Guido Trotter
3160 7baf741d Guido Trotter
    # boot order verification
3161 7baf741d Guido Trotter
    if self.op.hvm_boot_order is not None:
3162 7baf741d Guido Trotter
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3163 7baf741d Guido Trotter
        raise errors.OpPrereqError("invalid boot order specified,"
3164 7baf741d Guido Trotter
                                   " must be one or more of [acdn]")
3165 7baf741d Guido Trotter
    # file storage checks
3166 7baf741d Guido Trotter
    if (self.op.file_driver and
3167 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
3168 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3169 7baf741d Guido Trotter
                                 self.op.file_driver)
3170 7baf741d Guido Trotter
3171 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3172 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
3173 7baf741d Guido Trotter
3174 7baf741d Guido Trotter
    ### Node/iallocator related checks
3175 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3176 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3177 7baf741d Guido Trotter
                                 " node must be given")
3178 7baf741d Guido Trotter
3179 7baf741d Guido Trotter
    if self.op.iallocator:
3180 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3181 7baf741d Guido Trotter
    else:
3182 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
3183 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
3184 7baf741d Guido Trotter
      if self.op.snode is not None:
3185 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
3186 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
3187 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3188 7baf741d Guido Trotter
3189 7baf741d Guido Trotter
    # in case of import lock the source node too
3190 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
3191 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
3192 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
3193 7baf741d Guido Trotter
3194 7baf741d Guido Trotter
      if src_node is None or src_path is None:
3195 7baf741d Guido Trotter
        raise errors.OpPrereqError("Importing an instance requires source"
3196 7baf741d Guido Trotter
                                   " node and path options")
3197 7baf741d Guido Trotter
3198 7baf741d Guido Trotter
      if not os.path.isabs(src_path):
3199 7baf741d Guido Trotter
        raise errors.OpPrereqError("The source path must be absolute")
3200 7baf741d Guido Trotter
3201 7baf741d Guido Trotter
      self.op.src_node = src_node = self._ExpandNode(src_node)
3202 7baf741d Guido Trotter
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3203 7baf741d Guido Trotter
        self.needed_locks[locking.LEVEL_NODE].append(src_node)
3204 7baf741d Guido Trotter
3205 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
3206 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
3207 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
3208 a8083063 Iustin Pop
3209 538475ca Iustin Pop
  def _RunAllocator(self):
3210 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3211 538475ca Iustin Pop

3212 538475ca Iustin Pop
    """
3213 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3214 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3215 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3216 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3217 d1c2dd75 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3218 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3219 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3220 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3221 d1c2dd75 Iustin Pop
                     tags=[],
3222 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3223 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
3224 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
3225 d1c2dd75 Iustin Pop
                     disks=disks,
3226 d1c2dd75 Iustin Pop
                     nics=nics,
3227 29859cb7 Iustin Pop
                     )
3228 d1c2dd75 Iustin Pop
3229 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3230 d1c2dd75 Iustin Pop
3231 d1c2dd75 Iustin Pop
    if not ial.success:
3232 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3233 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3234 d1c2dd75 Iustin Pop
                                                           ial.info))
3235 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3236 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3237 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3238 27579978 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3239 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3240 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
3241 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
3242 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
3243 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
3244 27579978 Iustin Pop
    if ial.required_nodes == 2:
3245 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3246 538475ca Iustin Pop
3247 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3248 a8083063 Iustin Pop
    """Build hooks env.
3249 a8083063 Iustin Pop

3250 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3251 a8083063 Iustin Pop

3252 a8083063 Iustin Pop
    """
3253 a8083063 Iustin Pop
    env = {
3254 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3255 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3256 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3257 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3258 a8083063 Iustin Pop
      }
3259 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3260 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3261 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3262 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
3263 396e1b78 Michael Hanselmann
3264 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3265 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3266 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3267 396e1b78 Michael Hanselmann
      status=self.instance_status,
3268 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3269 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
3270 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
3271 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3272 396e1b78 Michael Hanselmann
    ))
3273 a8083063 Iustin Pop
3274 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
3275 a8083063 Iustin Pop
          self.secondaries)
3276 a8083063 Iustin Pop
    return env, nl, nl
3277 a8083063 Iustin Pop
3278 a8083063 Iustin Pop
3279 a8083063 Iustin Pop
  def CheckPrereq(self):
3280 a8083063 Iustin Pop
    """Check prerequisites.
3281 a8083063 Iustin Pop

3282 a8083063 Iustin Pop
    """
3283 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3284 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3285 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3286 eedc99de Manuel Franceschini
                                 " instances")
3287 eedc99de Manuel Franceschini
3288 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3289 7baf741d Guido Trotter
      src_node = self.op.src_node
3290 7baf741d Guido Trotter
      src_path = self.op.src_path
3291 a8083063 Iustin Pop
3292 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
3293 a8083063 Iustin Pop
3294 a8083063 Iustin Pop
      if not export_info:
3295 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3296 a8083063 Iustin Pop
3297 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3298 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3299 a8083063 Iustin Pop
3300 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3301 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3302 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3303 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3304 a8083063 Iustin Pop
3305 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3306 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3307 3ecf6786 Iustin Pop
                                   " one data disk")
3308 a8083063 Iustin Pop
3309 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3310 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3311 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3312 a8083063 Iustin Pop
                                                         'disk0_dump'))
3313 a8083063 Iustin Pop
      self.src_image = diskimage
3314 901a65c1 Iustin Pop
3315 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3316 901a65c1 Iustin Pop
3317 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3318 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3319 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3320 901a65c1 Iustin Pop
3321 901a65c1 Iustin Pop
    if self.op.ip_check:
3322 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3323 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3324 7baf741d Guido Trotter
                                   (self.check_ip, instance_name))
3325 901a65c1 Iustin Pop
3326 901a65c1 Iustin Pop
    # bridge verification
3327 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3328 901a65c1 Iustin Pop
    if bridge is None:
3329 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3330 901a65c1 Iustin Pop
    else:
3331 901a65c1 Iustin Pop
      self.op.bridge = bridge
3332 901a65c1 Iustin Pop
3333 538475ca Iustin Pop
    #### allocator run
3334 538475ca Iustin Pop
3335 538475ca Iustin Pop
    if self.op.iallocator is not None:
3336 538475ca Iustin Pop
      self._RunAllocator()
3337 0f1a06e3 Manuel Franceschini
3338 901a65c1 Iustin Pop
    #### node related checks
3339 901a65c1 Iustin Pop
3340 901a65c1 Iustin Pop
    # check primary node
3341 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3342 7baf741d Guido Trotter
    assert self.pnode is not None, \
3343 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
3344 901a65c1 Iustin Pop
    self.secondaries = []
3345 901a65c1 Iustin Pop
3346 901a65c1 Iustin Pop
    # mirror node verification
3347 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3348 7baf741d Guido Trotter
      if self.op.snode is None:
3349 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3350 3ecf6786 Iustin Pop
                                   " a mirror node")
3351 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
3352 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3353 3ecf6786 Iustin Pop
                                   " the primary node.")
3354 7baf741d Guido Trotter
      self.secondaries.append(self.op.snode)
3355 a8083063 Iustin Pop
3356 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3357 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3358 ed1ebc60 Guido Trotter
3359 8d75db10 Iustin Pop
    # Check lv size requirements
3360 8d75db10 Iustin Pop
    if req_size is not None:
3361 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3362 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3363 8d75db10 Iustin Pop
      for node in nodenames:
3364 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3365 8d75db10 Iustin Pop
        if not info:
3366 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3367 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3368 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3369 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3370 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3371 8d75db10 Iustin Pop
                                     " node %s" % node)
3372 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3373 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3374 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3375 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3376 ed1ebc60 Guido Trotter
3377 a8083063 Iustin Pop
    # os verification
3378 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3379 dfa96ded Guido Trotter
    if not os_obj:
3380 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3381 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3382 a8083063 Iustin Pop
3383 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3384 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3385 3b6d8c9b Iustin Pop
3386 901a65c1 Iustin Pop
    # bridge check on primary node
3387 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3388 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3389 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3390 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3391 a8083063 Iustin Pop
3392 49ce1563 Iustin Pop
    # memory check on primary node
3393 49ce1563 Iustin Pop
    if self.op.start:
3394 49ce1563 Iustin Pop
      _CheckNodeFreeMemory(self.cfg, self.pnode.name,
3395 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3396 49ce1563 Iustin Pop
                           self.op.mem_size)
3397 49ce1563 Iustin Pop
3398 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
3399 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
3400 7baf741d Guido Trotter
      # FIXME (als): shouldn't these checks happen on the destination node?
3401 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
3402 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
3403 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
3404 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3405 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
3406 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
3407 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
3408 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
3409 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3410 31a853d2 Iustin Pop
3411 31a853d2 Iustin Pop
    # vnc_bind_address verification
3412 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
3413 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
3414 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
3415 31a853d2 Iustin Pop
                                   " like a valid IP address" %
3416 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
3417 31a853d2 Iustin Pop
3418 5397e0b7 Alexander Schreiber
    # Xen HVM device type checks
3419 5397e0b7 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
3420 5397e0b7 Alexander Schreiber
      if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
3421 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid NIC type %s specified for Xen HVM"
3422 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_nic_type)
3423 5397e0b7 Alexander Schreiber
      if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
3424 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid disk type %s specified for Xen HVM"
3425 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_disk_type)
3426 5397e0b7 Alexander Schreiber
3427 a8083063 Iustin Pop
    if self.op.start:
3428 a8083063 Iustin Pop
      self.instance_status = 'up'
3429 a8083063 Iustin Pop
    else:
3430 a8083063 Iustin Pop
      self.instance_status = 'down'
3431 a8083063 Iustin Pop
3432 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3433 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3434 a8083063 Iustin Pop

3435 a8083063 Iustin Pop
    """
3436 a8083063 Iustin Pop
    instance = self.op.instance_name
3437 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3438 a8083063 Iustin Pop
3439 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3440 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3441 1862d460 Alexander Schreiber
    else:
3442 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3443 1862d460 Alexander Schreiber
3444 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3445 a8083063 Iustin Pop
    if self.inst_ip is not None:
3446 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3447 a8083063 Iustin Pop
3448 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3449 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3450 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3451 2a6469d5 Alexander Schreiber
    else:
3452 2a6469d5 Alexander Schreiber
      network_port = None
3453 58acb49d Alexander Schreiber
3454 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is None:
3455 31a853d2 Iustin Pop
      self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3456 31a853d2 Iustin Pop
3457 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3458 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3459 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3460 2c313123 Manuel Franceschini
    else:
3461 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3462 2c313123 Manuel Franceschini
3463 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3464 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3465 0f1a06e3 Manuel Franceschini
                                        self.sstore.GetFileStorageDir(),
3466 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3467 0f1a06e3 Manuel Franceschini
3468 0f1a06e3 Manuel Franceschini
3469 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3470 a8083063 Iustin Pop
                                  self.op.disk_template,
3471 a8083063 Iustin Pop
                                  instance, pnode_name,
3472 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3473 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3474 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3475 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3476 a8083063 Iustin Pop
3477 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3478 a8083063 Iustin Pop
                            primary_node=pnode_name,
3479 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3480 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3481 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3482 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3483 a8083063 Iustin Pop
                            status=self.instance_status,
3484 58acb49d Alexander Schreiber
                            network_port=network_port,
3485 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3486 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3487 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3488 31a853d2 Iustin Pop
                            hvm_acpi=self.op.hvm_acpi,
3489 31a853d2 Iustin Pop
                            hvm_pae=self.op.hvm_pae,
3490 31a853d2 Iustin Pop
                            hvm_cdrom_image_path=self.op.hvm_cdrom_image_path,
3491 31a853d2 Iustin Pop
                            vnc_bind_address=self.op.vnc_bind_address,
3492 5397e0b7 Alexander Schreiber
                            hvm_nic_type=self.op.hvm_nic_type,
3493 5397e0b7 Alexander Schreiber
                            hvm_disk_type=self.op.hvm_disk_type,
3494 a8083063 Iustin Pop
                            )
3495 a8083063 Iustin Pop
3496 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3497 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3498 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3499 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3500 a8083063 Iustin Pop
3501 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3502 a8083063 Iustin Pop
3503 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3504 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
3505 7baf741d Guido Trotter
    # added the instance to the config
3506 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
3507 a8083063 Iustin Pop
3508 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3509 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3510 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3511 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3512 a8083063 Iustin Pop
      time.sleep(15)
3513 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3514 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3515 a8083063 Iustin Pop
    else:
3516 a8083063 Iustin Pop
      disk_abort = False
3517 a8083063 Iustin Pop
3518 a8083063 Iustin Pop
    if disk_abort:
3519 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3520 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3521 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
3522 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3523 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3524 3ecf6786 Iustin Pop
                               " this instance")
3525 a8083063 Iustin Pop
3526 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3527 a8083063 Iustin Pop
                (instance, pnode_name))
3528 a8083063 Iustin Pop
3529 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3530 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3531 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3532 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3533 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3534 3ecf6786 Iustin Pop
                                   " on node %s" %
3535 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3536 a8083063 Iustin Pop
3537 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3538 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3539 a8083063 Iustin Pop
        src_node = self.op.src_node
3540 a8083063 Iustin Pop
        src_image = self.src_image
3541 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3542 a8083063 Iustin Pop
                                                src_node, src_image):
3543 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3544 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3545 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3546 a8083063 Iustin Pop
      else:
3547 a8083063 Iustin Pop
        # also checked in the prereq part
3548 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3549 3ecf6786 Iustin Pop
                                     % self.op.mode)
3550 a8083063 Iustin Pop
3551 a8083063 Iustin Pop
    if self.op.start:
3552 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3553 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3554 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3555 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3556 a8083063 Iustin Pop
3557 a8083063 Iustin Pop
3558 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3559 a8083063 Iustin Pop
  """Connect to an instance's console.
3560 a8083063 Iustin Pop

3561 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3562 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3563 a8083063 Iustin Pop
  console.
3564 a8083063 Iustin Pop

3565 a8083063 Iustin Pop
  """
3566 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3567 8659b73e Guido Trotter
  REQ_BGL = False
3568 8659b73e Guido Trotter
3569 8659b73e Guido Trotter
  def ExpandNames(self):
3570 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3571 a8083063 Iustin Pop
3572 a8083063 Iustin Pop
  def CheckPrereq(self):
3573 a8083063 Iustin Pop
    """Check prerequisites.
3574 a8083063 Iustin Pop

3575 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3576 a8083063 Iustin Pop

3577 a8083063 Iustin Pop
    """
3578 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3579 8659b73e Guido Trotter
    assert self.instance is not None, \
3580 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3581 a8083063 Iustin Pop
3582 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3583 a8083063 Iustin Pop
    """Connect to the console of an instance
3584 a8083063 Iustin Pop

3585 a8083063 Iustin Pop
    """
3586 a8083063 Iustin Pop
    instance = self.instance
3587 a8083063 Iustin Pop
    node = instance.primary_node
3588 a8083063 Iustin Pop
3589 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3590 a8083063 Iustin Pop
    if node_insts is False:
3591 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3592 a8083063 Iustin Pop
3593 a8083063 Iustin Pop
    if instance.name not in node_insts:
3594 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3595 a8083063 Iustin Pop
3596 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3597 a8083063 Iustin Pop
3598 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3599 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3600 b047857b Michael Hanselmann
3601 82122173 Iustin Pop
    # build ssh cmdline
3602 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3603 a8083063 Iustin Pop
3604 a8083063 Iustin Pop
3605 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3606 a8083063 Iustin Pop
  """Replace the disks of an instance.
3607 a8083063 Iustin Pop

3608 a8083063 Iustin Pop
  """
3609 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3610 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3611 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3612 efd990e4 Guido Trotter
  REQ_BGL = False
3613 efd990e4 Guido Trotter
3614 efd990e4 Guido Trotter
  def ExpandNames(self):
3615 efd990e4 Guido Trotter
    self._ExpandAndLockInstance()
3616 efd990e4 Guido Trotter
3617 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
3618 efd990e4 Guido Trotter
      self.op.remote_node = None
3619 efd990e4 Guido Trotter
3620 efd990e4 Guido Trotter
    ia_name = getattr(self.op, "iallocator", None)
3621 efd990e4 Guido Trotter
    if ia_name is not None:
3622 efd990e4 Guido Trotter
      if self.op.remote_node is not None:
3623 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
3624 efd990e4 Guido Trotter
                                   " secondary, not both")
3625 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3626 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
3627 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3628 efd990e4 Guido Trotter
      if remote_node is None:
3629 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
3630 efd990e4 Guido Trotter
                                   self.op.remote_node)
3631 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
3632 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
3633 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3634 efd990e4 Guido Trotter
    else:
3635 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
3636 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3637 efd990e4 Guido Trotter
3638 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
3639 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
3640 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
3641 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
3642 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
3643 efd990e4 Guido Trotter
      self._LockInstancesNodes()
3644 a8083063 Iustin Pop
3645 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3646 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3647 b6e82a65 Iustin Pop

3648 b6e82a65 Iustin Pop
    """
3649 b6e82a65 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3650 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3651 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3652 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3653 b6e82a65 Iustin Pop
3654 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3655 b6e82a65 Iustin Pop
3656 b6e82a65 Iustin Pop
    if not ial.success:
3657 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3658 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3659 b6e82a65 Iustin Pop
                                                           ial.info))
3660 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3661 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3662 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3663 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3664 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3665 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3666 b6e82a65 Iustin Pop
                    self.op.remote_node)
3667 b6e82a65 Iustin Pop
3668 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3669 a8083063 Iustin Pop
    """Build hooks env.
3670 a8083063 Iustin Pop

3671 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3672 a8083063 Iustin Pop

3673 a8083063 Iustin Pop
    """
3674 a8083063 Iustin Pop
    env = {
3675 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3676 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3677 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3678 a8083063 Iustin Pop
      }
3679 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3680 0834c866 Iustin Pop
    nl = [
3681 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3682 0834c866 Iustin Pop
      self.instance.primary_node,
3683 0834c866 Iustin Pop
      ]
3684 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3685 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3686 a8083063 Iustin Pop
    return env, nl, nl
3687 a8083063 Iustin Pop
3688 a8083063 Iustin Pop
  def CheckPrereq(self):
3689 a8083063 Iustin Pop
    """Check prerequisites.
3690 a8083063 Iustin Pop

3691 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3692 a8083063 Iustin Pop

3693 a8083063 Iustin Pop
    """
3694 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3695 efd990e4 Guido Trotter
    assert instance is not None, \
3696 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3697 a8083063 Iustin Pop
    self.instance = instance
3698 a8083063 Iustin Pop
3699 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3700 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3701 a9e0c397 Iustin Pop
                                 " network mirrored.")
3702 a8083063 Iustin Pop
3703 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3704 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3705 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3706 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3707 a8083063 Iustin Pop
3708 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3709 a9e0c397 Iustin Pop
3710 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3711 b6e82a65 Iustin Pop
    if ia_name is not None:
3712 de8c7666 Guido Trotter
      self._RunAllocator()
3713 b6e82a65 Iustin Pop
3714 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3715 a9e0c397 Iustin Pop
    if remote_node is not None:
3716 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3717 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
3718 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
3719 a9e0c397 Iustin Pop
    else:
3720 a9e0c397 Iustin Pop
      self.remote_node_info = None
3721 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3722 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3723 3ecf6786 Iustin Pop
                                 " the instance.")
3724 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3725 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3726 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3727 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3728 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3729 0834c866 Iustin Pop
                                   " replacement")
3730 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3731 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3732 7df43a76 Iustin Pop
          remote_node is not None):
3733 7df43a76 Iustin Pop
        # switch to replace secondary mode
3734 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3735 7df43a76 Iustin Pop
3736 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3737 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3738 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3739 a9e0c397 Iustin Pop
                                   " both at once")
3740 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3741 a9e0c397 Iustin Pop
        if remote_node is not None:
3742 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3743 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3744 a9e0c397 Iustin Pop
                                     " node disk replacement")
3745 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3746 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3747 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3748 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3749 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3750 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3751 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3752 a9e0c397 Iustin Pop
      else:
3753 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3754 a9e0c397 Iustin Pop
3755 a9e0c397 Iustin Pop
    for name in self.op.disks:
3756 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3757 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3758 a9e0c397 Iustin Pop
                                   (name, instance.name))
3759 a8083063 Iustin Pop
3760 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3761 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3762 a9e0c397 Iustin Pop

3763 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3764 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3765 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3766 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3767 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3768 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3769 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3770 a9e0c397 Iustin Pop
      - wait for sync across all devices
3771 a9e0c397 Iustin Pop
      - for each modified disk:
3772 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3773 a9e0c397 Iustin Pop

3774 a9e0c397 Iustin Pop
    Failures are not very well handled.
3775 cff90b79 Iustin Pop

3776 a9e0c397 Iustin Pop
    """
3777 cff90b79 Iustin Pop
    steps_total = 6
3778 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3779 a9e0c397 Iustin Pop
    instance = self.instance
3780 a9e0c397 Iustin Pop
    iv_names = {}
3781 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3782 a9e0c397 Iustin Pop
    # start of work
3783 a9e0c397 Iustin Pop
    cfg = self.cfg
3784 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3785 cff90b79 Iustin Pop
    oth_node = self.oth_node
3786 cff90b79 Iustin Pop
3787 cff90b79 Iustin Pop
    # Step: check device activation
3788 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3789 cff90b79 Iustin Pop
    info("checking volume groups")
3790 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3791 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3792 cff90b79 Iustin Pop
    if not results:
3793 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3794 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3795 cff90b79 Iustin Pop
      res = results.get(node, False)
3796 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3797 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3798 cff90b79 Iustin Pop
                                 (my_vg, node))
3799 cff90b79 Iustin Pop
    for dev in instance.disks:
3800 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3801 cff90b79 Iustin Pop
        continue
3802 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3803 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3804 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3805 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3806 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3807 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3808 cff90b79 Iustin Pop
3809 cff90b79 Iustin Pop
    # Step: check other node consistency
3810 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3811 cff90b79 Iustin Pop
    for dev in instance.disks:
3812 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3813 cff90b79 Iustin Pop
        continue
3814 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3815 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3816 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3817 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3818 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3819 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3820 cff90b79 Iustin Pop
3821 cff90b79 Iustin Pop
    # Step: create new storage
3822 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3823 a9e0c397 Iustin Pop
    for dev in instance.disks:
3824 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3825 a9e0c397 Iustin Pop
        continue
3826 a9e0c397 Iustin Pop
      size = dev.size
3827 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3828 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3829 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3830 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3831 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3832 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3833 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3834 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3835 a9e0c397 Iustin Pop
      old_lvs = dev.children
3836 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3837 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3838 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3839 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3840 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3841 a9e0c397 Iustin Pop
      # are talking about the secondary node
3842 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3843 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3844 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3845 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3846 a9e0c397 Iustin Pop
                                   " node '%s'" %
3847 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3848 a9e0c397 Iustin Pop
3849 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3850 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3851 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3852 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3853 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3854 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3855 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3856 cff90b79 Iustin Pop
      #dev.children = []
3857 cff90b79 Iustin Pop
      #cfg.Update(instance)
3858 a9e0c397 Iustin Pop
3859 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3860 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3861 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3862 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3863 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3864 cff90b79 Iustin Pop
3865 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3866 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3867 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3868 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3869 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3870 cff90b79 Iustin Pop
      rlist = []
3871 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3872 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3873 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3874 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3875 cff90b79 Iustin Pop
3876 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3877 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3878 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3879 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3880 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3881 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3882 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3883 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3884 cff90b79 Iustin Pop
3885 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3886 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3887 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3888 a9e0c397 Iustin Pop
3889 cff90b79 Iustin Pop
      for disk in old_lvs:
3890 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3891 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3892 a9e0c397 Iustin Pop
3893 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3894 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3895 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3896 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3897 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3898 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3899 cff90b79 Iustin Pop
                    " logical volumes")
3900 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3901 a9e0c397 Iustin Pop
3902 a9e0c397 Iustin Pop
      dev.children = new_lvs
3903 a9e0c397 Iustin Pop
      cfg.Update(instance)
3904 a9e0c397 Iustin Pop
3905 cff90b79 Iustin Pop
    # Step: wait for sync
3906 a9e0c397 Iustin Pop
3907 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3908 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3909 a9e0c397 Iustin Pop
    # return value
3910 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3911 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3912 a9e0c397 Iustin Pop
3913 a9e0c397 Iustin Pop
    # so check manually all the devices
3914 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3915 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3916 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3917 a9e0c397 Iustin Pop
      if is_degr:
3918 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3919 a9e0c397 Iustin Pop
3920 cff90b79 Iustin Pop
    # Step: remove old storage
3921 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3922 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3923 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3924 a9e0c397 Iustin Pop
      for lv in old_lvs:
3925 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3926 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3927 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3928 a9e0c397 Iustin Pop
          continue
3929 a9e0c397 Iustin Pop
3930 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3931 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3932 a9e0c397 Iustin Pop

3933 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3934 a9e0c397 Iustin Pop
      - for all disks of the instance:
3935 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3936 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3937 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3938 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3939 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3940 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3941 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3942 a9e0c397 Iustin Pop
          not network enabled
3943 a9e0c397 Iustin Pop
      - wait for sync across all devices
3944 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3945 a9e0c397 Iustin Pop

3946 a9e0c397 Iustin Pop
    Failures are not very well handled.
3947 0834c866 Iustin Pop

3948 a9e0c397 Iustin Pop
    """
3949 0834c866 Iustin Pop
    steps_total = 6
3950 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3951 a9e0c397 Iustin Pop
    instance = self.instance
3952 a9e0c397 Iustin Pop
    iv_names = {}
3953 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3954 a9e0c397 Iustin Pop
    # start of work
3955 a9e0c397 Iustin Pop
    cfg = self.cfg
3956 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3957 a9e0c397 Iustin Pop
    new_node = self.new_node
3958 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3959 0834c866 Iustin Pop
3960 0834c866 Iustin Pop
    # Step: check device activation
3961 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3962 0834c866 Iustin Pop
    info("checking volume groups")
3963 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3964 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3965 0834c866 Iustin Pop
    if not results:
3966 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3967 0834c866 Iustin Pop
    for node in pri_node, new_node:
3968 0834c866 Iustin Pop
      res = results.get(node, False)
3969 0834c866 Iustin Pop
      if not res or my_vg not in res:
3970 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3971 0834c866 Iustin Pop
                                 (my_vg, node))
3972 0834c866 Iustin Pop
    for dev in instance.disks:
3973 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3974 0834c866 Iustin Pop
        continue
3975 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3976 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3977 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3978 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3979 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3980 0834c866 Iustin Pop
3981 0834c866 Iustin Pop
    # Step: check other node consistency
3982 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3983 0834c866 Iustin Pop
    for dev in instance.disks:
3984 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3985 0834c866 Iustin Pop
        continue
3986 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3987 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3988 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3989 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3990 0834c866 Iustin Pop
                                 pri_node)
3991 0834c866 Iustin Pop
3992 0834c866 Iustin Pop
    # Step: create new storage
3993 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3994 a9e0c397 Iustin Pop
    for dev in instance.disks:
3995 a9e0c397 Iustin Pop
      size = dev.size
3996 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3997 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3998 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3999 a9e0c397 Iustin Pop
      # are talking about the secondary node
4000 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4001 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
4002 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4003 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4004 a9e0c397 Iustin Pop
                                   " node '%s'" %
4005 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4006 a9e0c397 Iustin Pop
4007 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
4008 0834c866 Iustin Pop
4009 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4010 0834c866 Iustin Pop
    for dev in instance.disks:
4011 0834c866 Iustin Pop
      size = dev.size
4012 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
4013 a9e0c397 Iustin Pop
      # create new devices on new_node
4014 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4015 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
4016 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
4017 a9e0c397 Iustin Pop
                              children=dev.children)
4018 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
4019 3f78eef2 Iustin Pop
                                        new_drbd, False,
4020 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
4021 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4022 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4023 a9e0c397 Iustin Pop
4024 0834c866 Iustin Pop
    for dev in instance.disks:
4025 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4026 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
4027 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4028 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
4029 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
4030 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4031 a9e0c397 Iustin Pop
4032 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4033 642445d9 Iustin Pop
    done = 0
4034 642445d9 Iustin Pop
    for dev in instance.disks:
4035 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4036 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
4037 642445d9 Iustin Pop
      # detach from network
4038 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
4039 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4040 642445d9 Iustin Pop
      # standalone state
4041 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
4042 642445d9 Iustin Pop
        done += 1
4043 642445d9 Iustin Pop
      else:
4044 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
4045 642445d9 Iustin Pop
                dev.iv_name)
4046 642445d9 Iustin Pop
4047 642445d9 Iustin Pop
    if not done:
4048 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4049 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4050 642445d9 Iustin Pop
4051 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4052 642445d9 Iustin Pop
    # the instance to point to the new secondary
4053 642445d9 Iustin Pop
    info("updating instance configuration")
4054 642445d9 Iustin Pop
    for dev in instance.disks:
4055 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
4056 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4057 642445d9 Iustin Pop
    cfg.Update(instance)
4058 a9e0c397 Iustin Pop
4059 642445d9 Iustin Pop
    # and now perform the drbd attach
4060 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4061 642445d9 Iustin Pop
    failures = []
4062 642445d9 Iustin Pop
    for dev in instance.disks:
4063 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
4064 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4065 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4066 642445d9 Iustin Pop
      # is correct
4067 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4068 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
4069 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
4070 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4071 a9e0c397 Iustin Pop
4072 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4073 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4074 a9e0c397 Iustin Pop
    # return value
4075 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4076 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
4077 a9e0c397 Iustin Pop
4078 a9e0c397 Iustin Pop
    # so check manually all the devices
4079 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
4080 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4081 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
4082 a9e0c397 Iustin Pop
      if is_degr:
4083 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4084 a9e0c397 Iustin Pop
4085 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4086 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
4087 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
4088 a9e0c397 Iustin Pop
      for lv in old_lvs:
4089 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4090 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
4091 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4092 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4093 a9e0c397 Iustin Pop
4094 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4095 a9e0c397 Iustin Pop
    """Execute disk replacement.
4096 a9e0c397 Iustin Pop

4097 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4098 a9e0c397 Iustin Pop

4099 a9e0c397 Iustin Pop
    """
4100 a9e0c397 Iustin Pop
    instance = self.instance
4101 22985314 Guido Trotter
4102 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
4103 22985314 Guido Trotter
    if instance.status == "down":
4104 023e3296 Guido Trotter
      _StartInstanceDisks(self.cfg, instance, True)
4105 22985314 Guido Trotter
4106 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4107 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4108 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4109 a9e0c397 Iustin Pop
      else:
4110 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4111 a9e0c397 Iustin Pop
    else:
4112 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4113 22985314 Guido Trotter
4114 22985314 Guido Trotter
    ret = fn(feedback_fn)
4115 22985314 Guido Trotter
4116 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
4117 22985314 Guido Trotter
    if instance.status == "down":
4118 023e3296 Guido Trotter
      _SafeShutdownInstanceDisks(instance, self.cfg)
4119 22985314 Guido Trotter
4120 22985314 Guido Trotter
    return ret
4121 a9e0c397 Iustin Pop
4122 a8083063 Iustin Pop
4123 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4124 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4125 8729e0d7 Iustin Pop

4126 8729e0d7 Iustin Pop
  """
4127 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4128 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4129 8729e0d7 Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount"]
4130 31e63dbf Guido Trotter
  REQ_BGL = False
4131 31e63dbf Guido Trotter
4132 31e63dbf Guido Trotter
  def ExpandNames(self):
4133 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4134 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4135 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4136 31e63dbf Guido Trotter
4137 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4138 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4139 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4140 8729e0d7 Iustin Pop
4141 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4142 8729e0d7 Iustin Pop
    """Build hooks env.
4143 8729e0d7 Iustin Pop

4144 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4145 8729e0d7 Iustin Pop

4146 8729e0d7 Iustin Pop
    """
4147 8729e0d7 Iustin Pop
    env = {
4148 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4149 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4150 8729e0d7 Iustin Pop
      }
4151 8729e0d7 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4152 8729e0d7 Iustin Pop
    nl = [
4153 8729e0d7 Iustin Pop
      self.sstore.GetMasterNode(),
4154 8729e0d7 Iustin Pop
      self.instance.primary_node,
4155 8729e0d7 Iustin Pop
      ]
4156 8729e0d7 Iustin Pop
    return env, nl, nl
4157 8729e0d7 Iustin Pop
4158 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4159 8729e0d7 Iustin Pop
    """Check prerequisites.
4160 8729e0d7 Iustin Pop

4161 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4162 8729e0d7 Iustin Pop

4163 8729e0d7 Iustin Pop
    """
4164 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4165 31e63dbf Guido Trotter
    assert instance is not None, \
4166 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4167 31e63dbf Guido Trotter
4168 8729e0d7 Iustin Pop
    self.instance = instance
4169 8729e0d7 Iustin Pop
4170 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4171 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4172 8729e0d7 Iustin Pop
                                 " growing.")
4173 8729e0d7 Iustin Pop
4174 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
4175 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4176 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
4177 8729e0d7 Iustin Pop
4178 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4179 8729e0d7 Iustin Pop
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
4180 8729e0d7 Iustin Pop
    for node in nodenames:
4181 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4182 8729e0d7 Iustin Pop
      if not info:
4183 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4184 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4185 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4186 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4187 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4188 8729e0d7 Iustin Pop
                                   " node %s" % node)
4189 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4190 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4191 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4192 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4193 8729e0d7 Iustin Pop
4194 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4195 8729e0d7 Iustin Pop
    """Execute disk grow.
4196 8729e0d7 Iustin Pop

4197 8729e0d7 Iustin Pop
    """
4198 8729e0d7 Iustin Pop
    instance = self.instance
4199 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
4200 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4201 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4202 8729e0d7 Iustin Pop
      result = rpc.call_blockdev_grow(node, disk, self.op.amount)
4203 86de84dd Guido Trotter
      if not result or not isinstance(result, (list, tuple)) or len(result) != 2:
4204 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4205 8729e0d7 Iustin Pop
      elif not result[0]:
4206 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4207 8729e0d7 Iustin Pop
                                 (node, result[1]))
4208 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4209 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4210 8729e0d7 Iustin Pop
    return
4211 8729e0d7 Iustin Pop
4212 8729e0d7 Iustin Pop
4213 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4214 a8083063 Iustin Pop
  """Query runtime instance data.
4215 a8083063 Iustin Pop

4216 a8083063 Iustin Pop
  """
4217 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
4218 a987fa48 Guido Trotter
  REQ_BGL = False
4219 a987fa48 Guido Trotter
  def ExpandNames(self):
4220 a987fa48 Guido Trotter
    self.needed_locks = {}
4221 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4222 a987fa48 Guido Trotter
4223 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
4224 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4225 a987fa48 Guido Trotter
4226 a987fa48 Guido Trotter
    if self.op.instances:
4227 a987fa48 Guido Trotter
      self.wanted_names = []
4228 a987fa48 Guido Trotter
      for name in self.op.instances:
4229 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
4230 a987fa48 Guido Trotter
        if full_name is None:
4231 a987fa48 Guido Trotter
          raise errors.OpPrereqError("Instance '%s' not known" %
4232 a987fa48 Guido Trotter
                                     self.op.instance_name)
4233 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
4234 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4235 a987fa48 Guido Trotter
    else:
4236 a987fa48 Guido Trotter
      self.wanted_names = None
4237 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4238 a987fa48 Guido Trotter
4239 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4240 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4241 a987fa48 Guido Trotter
4242 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
4243 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
4244 a987fa48 Guido Trotter
      self._LockInstancesNodes()
4245 a8083063 Iustin Pop
4246 a8083063 Iustin Pop
  def CheckPrereq(self):
4247 a8083063 Iustin Pop
    """Check prerequisites.
4248 a8083063 Iustin Pop

4249 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4250 a8083063 Iustin Pop

4251 a8083063 Iustin Pop
    """
4252 a987fa48 Guido Trotter
    if self.wanted_names is None:
4253 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4254 a8083063 Iustin Pop
4255 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4256 a987fa48 Guido Trotter
                             in self.wanted_names]
4257 a987fa48 Guido Trotter
    return
4258 a8083063 Iustin Pop
4259 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4260 a8083063 Iustin Pop
    """Compute block device status.
4261 a8083063 Iustin Pop

4262 a8083063 Iustin Pop
    """
4263 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4264 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4265 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4266 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4267 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4268 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4269 a8083063 Iustin Pop
      else:
4270 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4271 a8083063 Iustin Pop
4272 a8083063 Iustin Pop
    if snode:
4273 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4274 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4275 a8083063 Iustin Pop
    else:
4276 a8083063 Iustin Pop
      dev_sstatus = None
4277 a8083063 Iustin Pop
4278 a8083063 Iustin Pop
    if dev.children:
4279 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4280 a8083063 Iustin Pop
                      for child in dev.children]
4281 a8083063 Iustin Pop
    else:
4282 a8083063 Iustin Pop
      dev_children = []
4283 a8083063 Iustin Pop
4284 a8083063 Iustin Pop
    data = {
4285 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4286 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4287 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4288 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4289 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4290 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4291 a8083063 Iustin Pop
      "children": dev_children,
4292 a8083063 Iustin Pop
      }
4293 a8083063 Iustin Pop
4294 a8083063 Iustin Pop
    return data
4295 a8083063 Iustin Pop
4296 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4297 a8083063 Iustin Pop
    """Gather and return data"""
4298 a8083063 Iustin Pop
    result = {}
4299 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4300 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4301 a8083063 Iustin Pop
                                                instance.name)
4302 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4303 a8083063 Iustin Pop
        remote_state = "up"
4304 a8083063 Iustin Pop
      else:
4305 a8083063 Iustin Pop
        remote_state = "down"
4306 a8083063 Iustin Pop
      if instance.status == "down":
4307 a8083063 Iustin Pop
        config_state = "down"
4308 a8083063 Iustin Pop
      else:
4309 a8083063 Iustin Pop
        config_state = "up"
4310 a8083063 Iustin Pop
4311 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4312 a8083063 Iustin Pop
               for device in instance.disks]
4313 a8083063 Iustin Pop
4314 a8083063 Iustin Pop
      idict = {
4315 a8083063 Iustin Pop
        "name": instance.name,
4316 a8083063 Iustin Pop
        "config_state": config_state,
4317 a8083063 Iustin Pop
        "run_state": remote_state,
4318 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4319 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4320 a8083063 Iustin Pop
        "os": instance.os,
4321 a8083063 Iustin Pop
        "memory": instance.memory,
4322 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4323 a8083063 Iustin Pop
        "disks": disks,
4324 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4325 a8083063 Iustin Pop
        }
4326 a8083063 Iustin Pop
4327 a8340917 Iustin Pop
      htkind = self.sstore.GetHypervisorType()
4328 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_PVM30:
4329 a8340917 Iustin Pop
        idict["kernel_path"] = instance.kernel_path
4330 a8340917 Iustin Pop
        idict["initrd_path"] = instance.initrd_path
4331 a8340917 Iustin Pop
4332 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_HVM31:
4333 a8340917 Iustin Pop
        idict["hvm_boot_order"] = instance.hvm_boot_order
4334 a8340917 Iustin Pop
        idict["hvm_acpi"] = instance.hvm_acpi
4335 a8340917 Iustin Pop
        idict["hvm_pae"] = instance.hvm_pae
4336 a8340917 Iustin Pop
        idict["hvm_cdrom_image_path"] = instance.hvm_cdrom_image_path
4337 5397e0b7 Alexander Schreiber
        idict["hvm_nic_type"] = instance.hvm_nic_type
4338 5397e0b7 Alexander Schreiber
        idict["hvm_disk_type"] = instance.hvm_disk_type
4339 a8340917 Iustin Pop
4340 a8340917 Iustin Pop
      if htkind in constants.HTS_REQ_PORT:
4341 d0c11cf7 Alexander Schreiber
        if instance.vnc_bind_address is None:
4342 d0c11cf7 Alexander Schreiber
          vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4343 d0c11cf7 Alexander Schreiber
        else:
4344 d0c11cf7 Alexander Schreiber
          vnc_bind_address = instance.vnc_bind_address
4345 34b6ab97 Alexander Schreiber
        if instance.network_port is None:
4346 34b6ab97 Alexander Schreiber
          vnc_console_port = None
4347 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.BIND_ADDRESS_GLOBAL:
4348 a4273aba Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.primary_node,
4349 34b6ab97 Alexander Schreiber
                                       instance.network_port)
4350 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.LOCALHOST_IP_ADDRESS:
4351 d0c11cf7 Alexander Schreiber
          vnc_console_port = "%s:%s on node %s" % (vnc_bind_address,
4352 a4273aba Alexander Schreiber
                                                   instance.network_port,
4353 a4273aba Alexander Schreiber
                                                   instance.primary_node)
4354 34b6ab97 Alexander Schreiber
        else:
4355 34b6ab97 Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.vnc_bind_address,
4356 34b6ab97 Alexander Schreiber
                                        instance.network_port)
4357 34b6ab97 Alexander Schreiber
        idict["vnc_console_port"] = vnc_console_port
4358 d0c11cf7 Alexander Schreiber
        idict["vnc_bind_address"] = vnc_bind_address
4359 a8340917 Iustin Pop
        idict["network_port"] = instance.network_port
4360 a8340917 Iustin Pop
4361 a8083063 Iustin Pop
      result[instance.name] = idict
4362 a8083063 Iustin Pop
4363 a8083063 Iustin Pop
    return result
4364 a8083063 Iustin Pop
4365 a8083063 Iustin Pop
4366 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4367 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4368 a8083063 Iustin Pop

4369 a8083063 Iustin Pop
  """
4370 a8083063 Iustin Pop
  HPATH = "instance-modify"
4371 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4372 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4373 1a5c7281 Guido Trotter
  REQ_BGL = False
4374 1a5c7281 Guido Trotter
4375 1a5c7281 Guido Trotter
  def ExpandNames(self):
4376 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4377 a8083063 Iustin Pop
4378 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4379 a8083063 Iustin Pop
    """Build hooks env.
4380 a8083063 Iustin Pop

4381 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4382 a8083063 Iustin Pop

4383 a8083063 Iustin Pop
    """
4384 396e1b78 Michael Hanselmann
    args = dict()
4385 a8083063 Iustin Pop
    if self.mem:
4386 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4387 a8083063 Iustin Pop
    if self.vcpus:
4388 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4389 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4390 396e1b78 Michael Hanselmann
      if self.do_ip:
4391 396e1b78 Michael Hanselmann
        ip = self.ip
4392 396e1b78 Michael Hanselmann
      else:
4393 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4394 396e1b78 Michael Hanselmann
      if self.bridge:
4395 396e1b78 Michael Hanselmann
        bridge = self.bridge
4396 396e1b78 Michael Hanselmann
      else:
4397 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4398 ef756965 Iustin Pop
      if self.mac:
4399 ef756965 Iustin Pop
        mac = self.mac
4400 ef756965 Iustin Pop
      else:
4401 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4402 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4403 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4404 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4405 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4406 a8083063 Iustin Pop
    return env, nl, nl
4407 a8083063 Iustin Pop
4408 a8083063 Iustin Pop
  def CheckPrereq(self):
4409 a8083063 Iustin Pop
    """Check prerequisites.
4410 a8083063 Iustin Pop

4411 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4412 a8083063 Iustin Pop

4413 a8083063 Iustin Pop
    """
4414 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4415 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4416 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4417 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4418 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4419 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4420 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4421 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4422 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4423 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4424 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4425 31a853d2 Iustin Pop
    self.hvm_acpi = getattr(self.op, "hvm_acpi", None)
4426 31a853d2 Iustin Pop
    self.hvm_pae = getattr(self.op, "hvm_pae", None)
4427 5397e0b7 Alexander Schreiber
    self.hvm_nic_type = getattr(self.op, "hvm_nic_type", None)
4428 5397e0b7 Alexander Schreiber
    self.hvm_disk_type = getattr(self.op, "hvm_disk_type", None)
4429 31a853d2 Iustin Pop
    self.hvm_cdrom_image_path = getattr(self.op, "hvm_cdrom_image_path", None)
4430 31a853d2 Iustin Pop
    self.vnc_bind_address = getattr(self.op, "vnc_bind_address", None)
4431 4300c4b6 Guido Trotter
    self.force = getattr(self.op, "force", None)
4432 31a853d2 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4433 31a853d2 Iustin Pop
                 self.kernel_path, self.initrd_path, self.hvm_boot_order,
4434 31a853d2 Iustin Pop
                 self.hvm_acpi, self.hvm_pae, self.hvm_cdrom_image_path,
4435 5397e0b7 Alexander Schreiber
                 self.vnc_bind_address, self.hvm_nic_type, self.hvm_disk_type]
4436 31a853d2 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4437 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4438 a8083063 Iustin Pop
    if self.mem is not None:
4439 a8083063 Iustin Pop
      try:
4440 a8083063 Iustin Pop
        self.mem = int(self.mem)
4441 a8083063 Iustin Pop
      except ValueError, err:
4442 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4443 a8083063 Iustin Pop
    if self.vcpus is not None:
4444 a8083063 Iustin Pop
      try:
4445 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4446 a8083063 Iustin Pop
      except ValueError, err:
4447 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4448 a8083063 Iustin Pop
    if self.ip is not None:
4449 a8083063 Iustin Pop
      self.do_ip = True
4450 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4451 a8083063 Iustin Pop
        self.ip = None
4452 a8083063 Iustin Pop
      else:
4453 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4454 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4455 a8083063 Iustin Pop
    else:
4456 a8083063 Iustin Pop
      self.do_ip = False
4457 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4458 1862d460 Alexander Schreiber
    if self.mac is not None:
4459 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4460 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4461 1862d460 Alexander Schreiber
                                   self.mac)
4462 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4463 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4464 a8083063 Iustin Pop
4465 973d7867 Iustin Pop
    if self.kernel_path is not None:
4466 973d7867 Iustin Pop
      self.do_kernel_path = True
4467 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4468 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4469 973d7867 Iustin Pop
4470 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4471 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4472 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4473 973d7867 Iustin Pop
                                    " filename")
4474 8cafeb26 Iustin Pop
    else:
4475 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4476 973d7867 Iustin Pop
4477 973d7867 Iustin Pop
    if self.initrd_path is not None:
4478 973d7867 Iustin Pop
      self.do_initrd_path = True
4479 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4480 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4481 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4482 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4483 973d7867 Iustin Pop
                                    " filename")
4484 8cafeb26 Iustin Pop
    else:
4485 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4486 973d7867 Iustin Pop
4487 25c5878d Alexander Schreiber
    # boot order verification
4488 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4489 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4490 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4491 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4492 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4493 25c5878d Alexander Schreiber
                                     " or 'default'")
4494 25c5878d Alexander Schreiber
4495 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
4496 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
4497 3fc175f0 Alexander Schreiber
      if not (os.path.isabs(self.op.hvm_cdrom_image_path) or
4498 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4499 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
4500 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
4501 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4502 3fc175f0 Alexander Schreiber
      if not (os.path.isfile(self.op.hvm_cdrom_image_path) or
4503 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4504 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
4505 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
4506 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
4507 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4508 31a853d2 Iustin Pop
4509 31a853d2 Iustin Pop
    # vnc_bind_address verification
4510 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
4511 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
4512 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
4513 31a853d2 Iustin Pop
                                   " like a valid IP address" %
4514 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
4515 31a853d2 Iustin Pop
4516 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4517 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4518 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4519 cfefe007 Guido Trotter
    self.warn = []
4520 cfefe007 Guido Trotter
    if self.mem is not None and not self.force:
4521 cfefe007 Guido Trotter
      pnode = self.instance.primary_node
4522 cfefe007 Guido Trotter
      nodelist = [pnode]
4523 cfefe007 Guido Trotter
      nodelist.extend(instance.secondary_nodes)
4524 cfefe007 Guido Trotter
      instance_info = rpc.call_instance_info(pnode, instance.name)
4525 cfefe007 Guido Trotter
      nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
4526 cfefe007 Guido Trotter
4527 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4528 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4529 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4530 cfefe007 Guido Trotter
      else:
4531 cfefe007 Guido Trotter
        if instance_info:
4532 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4533 cfefe007 Guido Trotter
        else:
4534 cfefe007 Guido Trotter
          # Assume instance not running
4535 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4536 cfefe007 Guido Trotter
          # and we have no other way to check)
4537 cfefe007 Guido Trotter
          current_mem = 0
4538 cfefe007 Guido Trotter
        miss_mem = self.mem - current_mem - nodeinfo[pnode]['memory_free']
4539 cfefe007 Guido Trotter
        if miss_mem > 0:
4540 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4541 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4542 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4543 cfefe007 Guido Trotter
4544 cfefe007 Guido Trotter
      for node in instance.secondary_nodes:
4545 cfefe007 Guido Trotter
        if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4546 cfefe007 Guido Trotter
          self.warn.append("Can't get info from secondary node %s" % node)
4547 cfefe007 Guido Trotter
        elif self.mem > nodeinfo[node]['memory_free']:
4548 cfefe007 Guido Trotter
          self.warn.append("Not enough memory to failover instance to secondary"
4549 cfefe007 Guido Trotter
                           " node %s" % node)
4550 cfefe007 Guido Trotter
4551 5bc84f33 Alexander Schreiber
    # Xen HVM device type checks
4552 5bc84f33 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
4553 5bc84f33 Alexander Schreiber
      if self.op.hvm_nic_type is not None:
4554 5bc84f33 Alexander Schreiber
        if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
4555 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid NIC type %s specified for Xen"
4556 5bc84f33 Alexander Schreiber
                                     " HVM  hypervisor" % self.op.hvm_nic_type)
4557 5bc84f33 Alexander Schreiber
      if self.op.hvm_disk_type is not None:
4558 5bc84f33 Alexander Schreiber
        if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
4559 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid disk type %s specified for Xen"
4560 5bc84f33 Alexander Schreiber
                                     " HVM hypervisor" % self.op.hvm_disk_type)
4561 5bc84f33 Alexander Schreiber
4562 a8083063 Iustin Pop
    return
4563 a8083063 Iustin Pop
4564 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4565 a8083063 Iustin Pop
    """Modifies an instance.
4566 a8083063 Iustin Pop

4567 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4568 a8083063 Iustin Pop
    """
4569 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4570 cfefe007 Guido Trotter
    # feedback_fn there.
4571 cfefe007 Guido Trotter
    for warn in self.warn:
4572 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4573 cfefe007 Guido Trotter
4574 a8083063 Iustin Pop
    result = []
4575 a8083063 Iustin Pop
    instance = self.instance
4576 a8083063 Iustin Pop
    if self.mem:
4577 a8083063 Iustin Pop
      instance.memory = self.mem
4578 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4579 a8083063 Iustin Pop
    if self.vcpus:
4580 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4581 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4582 a8083063 Iustin Pop
    if self.do_ip:
4583 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4584 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4585 a8083063 Iustin Pop
    if self.bridge:
4586 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4587 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4588 1862d460 Alexander Schreiber
    if self.mac:
4589 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4590 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4591 973d7867 Iustin Pop
    if self.do_kernel_path:
4592 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4593 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4594 973d7867 Iustin Pop
    if self.do_initrd_path:
4595 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4596 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4597 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4598 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4599 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4600 25c5878d Alexander Schreiber
      else:
4601 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4602 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4603 3fc175f0 Alexander Schreiber
    if self.hvm_acpi is not None:
4604 ec1ba002 Iustin Pop
      instance.hvm_acpi = self.hvm_acpi
4605 31a853d2 Iustin Pop
      result.append(("hvm_acpi", self.hvm_acpi))
4606 3fc175f0 Alexander Schreiber
    if self.hvm_pae is not None:
4607 ec1ba002 Iustin Pop
      instance.hvm_pae = self.hvm_pae
4608 31a853d2 Iustin Pop
      result.append(("hvm_pae", self.hvm_pae))
4609 5397e0b7 Alexander Schreiber
    if self.hvm_nic_type is not None:
4610 5397e0b7 Alexander Schreiber
      instance.hvm_nic_type = self.hvm_nic_type
4611 5397e0b7 Alexander Schreiber
      result.append(("hvm_nic_type", self.hvm_nic_type))
4612 5397e0b7 Alexander Schreiber
    if self.hvm_disk_type is not None:
4613 5397e0b7 Alexander Schreiber
      instance.hvm_disk_type = self.hvm_disk_type
4614 5397e0b7 Alexander Schreiber
      result.append(("hvm_disk_type", self.hvm_disk_type))
4615 31a853d2 Iustin Pop
    if self.hvm_cdrom_image_path:
4616 3fc175f0 Alexander Schreiber
      if self.hvm_cdrom_image_path == constants.VALUE_NONE:
4617 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = None
4618 3fc175f0 Alexander Schreiber
      else:
4619 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
4620 31a853d2 Iustin Pop
      result.append(("hvm_cdrom_image_path", self.hvm_cdrom_image_path))
4621 31a853d2 Iustin Pop
    if self.vnc_bind_address:
4622 31a853d2 Iustin Pop
      instance.vnc_bind_address = self.vnc_bind_address
4623 31a853d2 Iustin Pop
      result.append(("vnc_bind_address", self.vnc_bind_address))
4624 a8083063 Iustin Pop
4625 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4626 a8083063 Iustin Pop
4627 a8083063 Iustin Pop
    return result
4628 a8083063 Iustin Pop
4629 a8083063 Iustin Pop
4630 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4631 a8083063 Iustin Pop
  """Query the exports list
4632 a8083063 Iustin Pop

4633 a8083063 Iustin Pop
  """
4634 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
4635 21a15682 Guido Trotter
  REQ_BGL = False
4636 21a15682 Guido Trotter
4637 21a15682 Guido Trotter
  def ExpandNames(self):
4638 21a15682 Guido Trotter
    self.needed_locks = {}
4639 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4640 21a15682 Guido Trotter
    if not self.op.nodes:
4641 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4642 21a15682 Guido Trotter
    else:
4643 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
4644 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
4645 a8083063 Iustin Pop
4646 a8083063 Iustin Pop
  def CheckPrereq(self):
4647 21a15682 Guido Trotter
    """Check prerequisites.
4648 a8083063 Iustin Pop

4649 a8083063 Iustin Pop
    """
4650 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
4651 a8083063 Iustin Pop
4652 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4653 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4654 a8083063 Iustin Pop

4655 a8083063 Iustin Pop
    Returns:
4656 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4657 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4658 a8083063 Iustin Pop
      that node.
4659 a8083063 Iustin Pop

4660 a8083063 Iustin Pop
    """
4661 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4662 a8083063 Iustin Pop
4663 a8083063 Iustin Pop
4664 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4665 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4666 a8083063 Iustin Pop

4667 a8083063 Iustin Pop
  """
4668 a8083063 Iustin Pop
  HPATH = "instance-export"
4669 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4670 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4671 6657590e Guido Trotter
  REQ_BGL = False
4672 6657590e Guido Trotter
4673 6657590e Guido Trotter
  def ExpandNames(self):
4674 6657590e Guido Trotter
    self._ExpandAndLockInstance()
4675 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
4676 6657590e Guido Trotter
    #
4677 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
4678 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
4679 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
4680 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
4681 6657590e Guido Trotter
    #    then one to remove, after
4682 6657590e Guido Trotter
    #  - removing the removal operation altoghether
4683 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4684 6657590e Guido Trotter
4685 6657590e Guido Trotter
  def DeclareLocks(self, level):
4686 6657590e Guido Trotter
    """Last minute lock declaration."""
4687 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
4688 a8083063 Iustin Pop
4689 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4690 a8083063 Iustin Pop
    """Build hooks env.
4691 a8083063 Iustin Pop

4692 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4693 a8083063 Iustin Pop

4694 a8083063 Iustin Pop
    """
4695 a8083063 Iustin Pop
    env = {
4696 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4697 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4698 a8083063 Iustin Pop
      }
4699 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4700 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4701 a8083063 Iustin Pop
          self.op.target_node]
4702 a8083063 Iustin Pop
    return env, nl, nl
4703 a8083063 Iustin Pop
4704 a8083063 Iustin Pop
  def CheckPrereq(self):
4705 a8083063 Iustin Pop
    """Check prerequisites.
4706 a8083063 Iustin Pop

4707 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4708 a8083063 Iustin Pop

4709 a8083063 Iustin Pop
    """
4710 6657590e Guido Trotter
    instance_name = self.op.instance_name
4711 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4712 6657590e Guido Trotter
    assert self.instance is not None, \
4713 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
4714 a8083063 Iustin Pop
4715 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
4716 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
4717 a8083063 Iustin Pop
4718 6657590e Guido Trotter
    assert self.dst_node is not None, \
4719 6657590e Guido Trotter
          "Cannot retrieve locked node %s" % self.op.target_node
4720 a8083063 Iustin Pop
4721 b6023d6c Manuel Franceschini
    # instance disk type verification
4722 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4723 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4724 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4725 b6023d6c Manuel Franceschini
                                   " file-based disks")
4726 b6023d6c Manuel Franceschini
4727 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4728 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4729 a8083063 Iustin Pop

4730 a8083063 Iustin Pop
    """
4731 a8083063 Iustin Pop
    instance = self.instance
4732 a8083063 Iustin Pop
    dst_node = self.dst_node
4733 a8083063 Iustin Pop
    src_node = instance.primary_node
4734 a8083063 Iustin Pop
    if self.op.shutdown:
4735 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4736 fb300fb7 Guido Trotter
      if not rpc.call_instance_shutdown(src_node, instance):
4737 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4738 38206f3c Iustin Pop
                                 (instance.name, src_node))
4739 a8083063 Iustin Pop
4740 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4741 a8083063 Iustin Pop
4742 a8083063 Iustin Pop
    snap_disks = []
4743 a8083063 Iustin Pop
4744 a8083063 Iustin Pop
    try:
4745 a8083063 Iustin Pop
      for disk in instance.disks:
4746 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4747 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4748 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4749 a8083063 Iustin Pop
4750 a8083063 Iustin Pop
          if not new_dev_name:
4751 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4752 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4753 a8083063 Iustin Pop
          else:
4754 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4755 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4756 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4757 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4758 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4759 a8083063 Iustin Pop
4760 a8083063 Iustin Pop
    finally:
4761 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4762 fb300fb7 Guido Trotter
        if not rpc.call_instance_start(src_node, instance, None):
4763 fb300fb7 Guido Trotter
          _ShutdownInstanceDisks(instance, self.cfg)
4764 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4765 a8083063 Iustin Pop
4766 a8083063 Iustin Pop
    # TODO: check for size
4767 a8083063 Iustin Pop
4768 a8083063 Iustin Pop
    for dev in snap_disks:
4769 16687b98 Manuel Franceschini
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
4770 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4771 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4772 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4773 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4774 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4775 a8083063 Iustin Pop
4776 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4777 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4778 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4779 a8083063 Iustin Pop
4780 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4781 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4782 a8083063 Iustin Pop
4783 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4784 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4785 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4786 a8083063 Iustin Pop
    if nodelist:
4787 204f2086 Guido Trotter
      exportlist = rpc.call_export_list(nodelist)
4788 a8083063 Iustin Pop
      for node in exportlist:
4789 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4790 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4791 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4792 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4793 5c947f38 Iustin Pop
4794 5c947f38 Iustin Pop
4795 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4796 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4797 9ac99fda Guido Trotter

4798 9ac99fda Guido Trotter
  """
4799 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4800 3656b3af Guido Trotter
  REQ_BGL = False
4801 3656b3af Guido Trotter
4802 3656b3af Guido Trotter
  def ExpandNames(self):
4803 3656b3af Guido Trotter
    self.needed_locks = {}
4804 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
4805 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
4806 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
4807 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4808 9ac99fda Guido Trotter
4809 9ac99fda Guido Trotter
  def CheckPrereq(self):
4810 9ac99fda Guido Trotter
    """Check prerequisites.
4811 9ac99fda Guido Trotter
    """
4812 9ac99fda Guido Trotter
    pass
4813 9ac99fda Guido Trotter
4814 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4815 9ac99fda Guido Trotter
    """Remove any export.
4816 9ac99fda Guido Trotter

4817 9ac99fda Guido Trotter
    """
4818 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4819 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4820 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4821 9ac99fda Guido Trotter
    fqdn_warn = False
4822 9ac99fda Guido Trotter
    if not instance_name:
4823 9ac99fda Guido Trotter
      fqdn_warn = True
4824 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4825 9ac99fda Guido Trotter
4826 3656b3af Guido Trotter
    exportlist = rpc.call_export_list(self.acquired_locks[locking.LEVEL_NODE])
4827 9ac99fda Guido Trotter
    found = False
4828 9ac99fda Guido Trotter
    for node in exportlist:
4829 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4830 9ac99fda Guido Trotter
        found = True
4831 9ac99fda Guido Trotter
        if not rpc.call_export_remove(node, instance_name):
4832 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4833 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4834 9ac99fda Guido Trotter
4835 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4836 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4837 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4838 9ac99fda Guido Trotter
                  " Domain Name.")
4839 9ac99fda Guido Trotter
4840 9ac99fda Guido Trotter
4841 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4842 5c947f38 Iustin Pop
  """Generic tags LU.
4843 5c947f38 Iustin Pop

4844 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4845 5c947f38 Iustin Pop

4846 5c947f38 Iustin Pop
  """
4847 5c947f38 Iustin Pop
4848 8646adce Guido Trotter
  def ExpandNames(self):
4849 8646adce Guido Trotter
    self.needed_locks = {}
4850 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
4851 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4852 5c947f38 Iustin Pop
      if name is None:
4853 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4854 3ecf6786 Iustin Pop
                                   (self.op.name,))
4855 5c947f38 Iustin Pop
      self.op.name = name
4856 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
4857 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4858 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4859 5c947f38 Iustin Pop
      if name is None:
4860 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4861 3ecf6786 Iustin Pop
                                   (self.op.name,))
4862 5c947f38 Iustin Pop
      self.op.name = name
4863 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
4864 8646adce Guido Trotter
4865 8646adce Guido Trotter
  def CheckPrereq(self):
4866 8646adce Guido Trotter
    """Check prerequisites.
4867 8646adce Guido Trotter

4868 8646adce Guido Trotter
    """
4869 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
4870 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
4871 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
4872 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
4873 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
4874 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
4875 5c947f38 Iustin Pop
    else:
4876 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4877 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4878 5c947f38 Iustin Pop
4879 5c947f38 Iustin Pop
4880 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4881 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4882 5c947f38 Iustin Pop

4883 5c947f38 Iustin Pop
  """
4884 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4885 8646adce Guido Trotter
  REQ_BGL = False
4886 5c947f38 Iustin Pop
4887 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4888 5c947f38 Iustin Pop
    """Returns the tag list.
4889 5c947f38 Iustin Pop

4890 5c947f38 Iustin Pop
    """
4891 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
4892 5c947f38 Iustin Pop
4893 5c947f38 Iustin Pop
4894 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4895 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4896 73415719 Iustin Pop

4897 73415719 Iustin Pop
  """
4898 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4899 8646adce Guido Trotter
  REQ_BGL = False
4900 8646adce Guido Trotter
4901 8646adce Guido Trotter
  def ExpandNames(self):
4902 8646adce Guido Trotter
    self.needed_locks = {}
4903 73415719 Iustin Pop
4904 73415719 Iustin Pop
  def CheckPrereq(self):
4905 73415719 Iustin Pop
    """Check prerequisites.
4906 73415719 Iustin Pop

4907 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4908 73415719 Iustin Pop

4909 73415719 Iustin Pop
    """
4910 73415719 Iustin Pop
    try:
4911 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4912 73415719 Iustin Pop
    except re.error, err:
4913 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4914 73415719 Iustin Pop
                                 (self.op.pattern, err))
4915 73415719 Iustin Pop
4916 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4917 73415719 Iustin Pop
    """Returns the tag list.
4918 73415719 Iustin Pop

4919 73415719 Iustin Pop
    """
4920 73415719 Iustin Pop
    cfg = self.cfg
4921 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4922 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
4923 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4924 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
4925 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4926 73415719 Iustin Pop
    results = []
4927 73415719 Iustin Pop
    for path, target in tgts:
4928 73415719 Iustin Pop
      for tag in target.GetTags():
4929 73415719 Iustin Pop
        if self.re.search(tag):
4930 73415719 Iustin Pop
          results.append((path, tag))
4931 73415719 Iustin Pop
    return results
4932 73415719 Iustin Pop
4933 73415719 Iustin Pop
4934 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4935 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4936 5c947f38 Iustin Pop

4937 5c947f38 Iustin Pop
  """
4938 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4939 8646adce Guido Trotter
  REQ_BGL = False
4940 5c947f38 Iustin Pop
4941 5c947f38 Iustin Pop
  def CheckPrereq(self):
4942 5c947f38 Iustin Pop
    """Check prerequisites.
4943 5c947f38 Iustin Pop

4944 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4945 5c947f38 Iustin Pop

4946 5c947f38 Iustin Pop
    """
4947 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4948 f27302fa Iustin Pop
    for tag in self.op.tags:
4949 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4950 5c947f38 Iustin Pop
4951 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4952 5c947f38 Iustin Pop
    """Sets the tag.
4953 5c947f38 Iustin Pop

4954 5c947f38 Iustin Pop
    """
4955 5c947f38 Iustin Pop
    try:
4956 f27302fa Iustin Pop
      for tag in self.op.tags:
4957 f27302fa Iustin Pop
        self.target.AddTag(tag)
4958 5c947f38 Iustin Pop
    except errors.TagError, err:
4959 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4960 5c947f38 Iustin Pop
    try:
4961 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4962 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4963 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4964 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4965 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4966 5c947f38 Iustin Pop
4967 5c947f38 Iustin Pop
4968 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4969 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4970 5c947f38 Iustin Pop

4971 5c947f38 Iustin Pop
  """
4972 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4973 8646adce Guido Trotter
  REQ_BGL = False
4974 5c947f38 Iustin Pop
4975 5c947f38 Iustin Pop
  def CheckPrereq(self):
4976 5c947f38 Iustin Pop
    """Check prerequisites.
4977 5c947f38 Iustin Pop

4978 5c947f38 Iustin Pop
    This checks that we have the given tag.
4979 5c947f38 Iustin Pop

4980 5c947f38 Iustin Pop
    """
4981 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4982 f27302fa Iustin Pop
    for tag in self.op.tags:
4983 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4984 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4985 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4986 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4987 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4988 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4989 f27302fa Iustin Pop
      diff_names.sort()
4990 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4991 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4992 5c947f38 Iustin Pop
4993 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4994 5c947f38 Iustin Pop
    """Remove the tag from the object.
4995 5c947f38 Iustin Pop

4996 5c947f38 Iustin Pop
    """
4997 f27302fa Iustin Pop
    for tag in self.op.tags:
4998 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4999 5c947f38 Iustin Pop
    try:
5000 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5001 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5002 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5003 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5004 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5005 06009e27 Iustin Pop
5006 0eed6e61 Guido Trotter
5007 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
5008 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
5009 06009e27 Iustin Pop

5010 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
5011 06009e27 Iustin Pop
  time.
5012 06009e27 Iustin Pop

5013 06009e27 Iustin Pop
  """
5014 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5015 fbe9022f Guido Trotter
  REQ_BGL = False
5016 06009e27 Iustin Pop
5017 fbe9022f Guido Trotter
  def ExpandNames(self):
5018 fbe9022f Guido Trotter
    """Expand names and set required locks.
5019 06009e27 Iustin Pop

5020 fbe9022f Guido Trotter
    This expands the node list, if any.
5021 06009e27 Iustin Pop

5022 06009e27 Iustin Pop
    """
5023 fbe9022f Guido Trotter
    self.needed_locks = {}
5024 06009e27 Iustin Pop
    if self.op.on_nodes:
5025 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
5026 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5027 fbe9022f Guido Trotter
      # more information.
5028 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5029 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5030 fbe9022f Guido Trotter
5031 fbe9022f Guido Trotter
  def CheckPrereq(self):
5032 fbe9022f Guido Trotter
    """Check prerequisites.
5033 fbe9022f Guido Trotter

5034 fbe9022f Guido Trotter
    """
5035 06009e27 Iustin Pop
5036 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
5037 06009e27 Iustin Pop
    """Do the actual sleep.
5038 06009e27 Iustin Pop

5039 06009e27 Iustin Pop
    """
5040 06009e27 Iustin Pop
    if self.op.on_master:
5041 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
5042 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
5043 06009e27 Iustin Pop
    if self.op.on_nodes:
5044 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5045 06009e27 Iustin Pop
      if not result:
5046 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
5047 06009e27 Iustin Pop
      for node, node_result in result.items():
5048 06009e27 Iustin Pop
        if not node_result:
5049 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
5050 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
5051 d61df03e Iustin Pop
5052 d61df03e Iustin Pop
5053 d1c2dd75 Iustin Pop
class IAllocator(object):
5054 d1c2dd75 Iustin Pop
  """IAllocator framework.
5055 d61df03e Iustin Pop

5056 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
5057 d1c2dd75 Iustin Pop
    - cfg/sstore that are needed to query the cluster
5058 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
5059 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
5060 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
5061 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
5062 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
5063 d1c2dd75 Iustin Pop
      easy usage
5064 d61df03e Iustin Pop

5065 d61df03e Iustin Pop
  """
5066 29859cb7 Iustin Pop
  _ALLO_KEYS = [
5067 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
5068 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
5069 d1c2dd75 Iustin Pop
    ]
5070 29859cb7 Iustin Pop
  _RELO_KEYS = [
5071 29859cb7 Iustin Pop
    "relocate_from",
5072 29859cb7 Iustin Pop
    ]
5073 d1c2dd75 Iustin Pop
5074 29859cb7 Iustin Pop
  def __init__(self, cfg, sstore, mode, name, **kwargs):
5075 d1c2dd75 Iustin Pop
    self.cfg = cfg
5076 d1c2dd75 Iustin Pop
    self.sstore = sstore
5077 d1c2dd75 Iustin Pop
    # init buffer variables
5078 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
5079 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
5080 29859cb7 Iustin Pop
    self.mode = mode
5081 29859cb7 Iustin Pop
    self.name = name
5082 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
5083 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
5084 29859cb7 Iustin Pop
    self.relocate_from = None
5085 27579978 Iustin Pop
    # computed fields
5086 27579978 Iustin Pop
    self.required_nodes = None
5087 d1c2dd75 Iustin Pop
    # init result fields
5088 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
5089 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5090 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
5091 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5092 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
5093 29859cb7 Iustin Pop
    else:
5094 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5095 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
5096 d1c2dd75 Iustin Pop
    for key in kwargs:
5097 29859cb7 Iustin Pop
      if key not in keyset:
5098 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5099 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5100 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
5101 29859cb7 Iustin Pop
    for key in keyset:
5102 d1c2dd75 Iustin Pop
      if key not in kwargs:
5103 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5104 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5105 d1c2dd75 Iustin Pop
    self._BuildInputData()
5106 d1c2dd75 Iustin Pop
5107 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
5108 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
5109 d1c2dd75 Iustin Pop

5110 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
5111 d1c2dd75 Iustin Pop

5112 d1c2dd75 Iustin Pop
    """
5113 d1c2dd75 Iustin Pop
    cfg = self.cfg
5114 d1c2dd75 Iustin Pop
    # cluster data
5115 d1c2dd75 Iustin Pop
    data = {
5116 d1c2dd75 Iustin Pop
      "version": 1,
5117 d1c2dd75 Iustin Pop
      "cluster_name": self.sstore.GetClusterName(),
5118 d1c2dd75 Iustin Pop
      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
5119 6286519f Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
5120 d1c2dd75 Iustin Pop
      # we don't have job IDs
5121 d61df03e Iustin Pop
      }
5122 d61df03e Iustin Pop
5123 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
5124 6286519f Iustin Pop
5125 d1c2dd75 Iustin Pop
    # node data
5126 d1c2dd75 Iustin Pop
    node_results = {}
5127 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
5128 d1c2dd75 Iustin Pop
    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
5129 d1c2dd75 Iustin Pop
    for nname in node_list:
5130 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
5131 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
5132 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
5133 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
5134 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5135 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
5136 d1c2dd75 Iustin Pop
        if attr not in remote_info:
5137 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5138 d1c2dd75 Iustin Pop
                                   (nname, attr))
5139 d1c2dd75 Iustin Pop
        try:
5140 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
5141 d1c2dd75 Iustin Pop
        except ValueError, err:
5142 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5143 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
5144 6286519f Iustin Pop
      # compute memory used by primary instances
5145 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
5146 6286519f Iustin Pop
      for iinfo in i_list:
5147 6286519f Iustin Pop
        if iinfo.primary_node == nname:
5148 6286519f Iustin Pop
          i_p_mem += iinfo.memory
5149 6286519f Iustin Pop
          if iinfo.status == "up":
5150 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
5151 6286519f Iustin Pop
5152 b2662e7f Iustin Pop
      # compute memory used by instances
5153 d1c2dd75 Iustin Pop
      pnr = {
5154 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
5155 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
5156 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
5157 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
5158 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
5159 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
5160 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
5161 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
5162 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
5163 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5164 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5165 d1c2dd75 Iustin Pop
        }
5166 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5167 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5168 d1c2dd75 Iustin Pop
5169 d1c2dd75 Iustin Pop
    # instance data
5170 d1c2dd75 Iustin Pop
    instance_data = {}
5171 6286519f Iustin Pop
    for iinfo in i_list:
5172 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5173 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5174 d1c2dd75 Iustin Pop
      pir = {
5175 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5176 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5177 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
5178 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
5179 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5180 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5181 d1c2dd75 Iustin Pop
        "nics": nic_data,
5182 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5183 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5184 d1c2dd75 Iustin Pop
        }
5185 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5186 d61df03e Iustin Pop
5187 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5188 d61df03e Iustin Pop
5189 d1c2dd75 Iustin Pop
    self.in_data = data
5190 d61df03e Iustin Pop
5191 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5192 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5193 d61df03e Iustin Pop

5194 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5195 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5196 d61df03e Iustin Pop

5197 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5198 d1c2dd75 Iustin Pop
    done.
5199 d61df03e Iustin Pop

5200 d1c2dd75 Iustin Pop
    """
5201 d1c2dd75 Iustin Pop
    data = self.in_data
5202 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5203 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5204 d1c2dd75 Iustin Pop
5205 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
5206 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
5207 d1c2dd75 Iustin Pop
5208 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5209 27579978 Iustin Pop
      self.required_nodes = 2
5210 27579978 Iustin Pop
    else:
5211 27579978 Iustin Pop
      self.required_nodes = 1
5212 d1c2dd75 Iustin Pop
    request = {
5213 d1c2dd75 Iustin Pop
      "type": "allocate",
5214 d1c2dd75 Iustin Pop
      "name": self.name,
5215 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5216 d1c2dd75 Iustin Pop
      "tags": self.tags,
5217 d1c2dd75 Iustin Pop
      "os": self.os,
5218 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5219 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5220 d1c2dd75 Iustin Pop
      "disks": self.disks,
5221 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5222 d1c2dd75 Iustin Pop
      "nics": self.nics,
5223 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5224 d1c2dd75 Iustin Pop
      }
5225 d1c2dd75 Iustin Pop
    data["request"] = request
5226 298fe380 Iustin Pop
5227 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5228 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5229 298fe380 Iustin Pop

5230 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5231 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5232 d61df03e Iustin Pop

5233 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5234 d1c2dd75 Iustin Pop
    done.
5235 d61df03e Iustin Pop

5236 d1c2dd75 Iustin Pop
    """
5237 27579978 Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.name)
5238 27579978 Iustin Pop
    if instance is None:
5239 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5240 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5241 27579978 Iustin Pop
5242 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5243 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5244 27579978 Iustin Pop
5245 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5246 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5247 2a139bb0 Iustin Pop
5248 27579978 Iustin Pop
    self.required_nodes = 1
5249 27579978 Iustin Pop
5250 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
5251 27579978 Iustin Pop
                                  instance.disks[0].size,
5252 27579978 Iustin Pop
                                  instance.disks[1].size)
5253 27579978 Iustin Pop
5254 d1c2dd75 Iustin Pop
    request = {
5255 2a139bb0 Iustin Pop
      "type": "relocate",
5256 d1c2dd75 Iustin Pop
      "name": self.name,
5257 27579978 Iustin Pop
      "disk_space_total": disk_space,
5258 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5259 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5260 d1c2dd75 Iustin Pop
      }
5261 27579978 Iustin Pop
    self.in_data["request"] = request
5262 d61df03e Iustin Pop
5263 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5264 d1c2dd75 Iustin Pop
    """Build input data structures.
5265 d61df03e Iustin Pop

5266 d1c2dd75 Iustin Pop
    """
5267 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5268 d61df03e Iustin Pop
5269 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5270 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5271 d1c2dd75 Iustin Pop
    else:
5272 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5273 d61df03e Iustin Pop
5274 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5275 d61df03e Iustin Pop
5276 8d528b7c Iustin Pop
  def Run(self, name, validate=True, call_fn=rpc.call_iallocator_runner):
5277 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5278 298fe380 Iustin Pop

5279 d1c2dd75 Iustin Pop
    """
5280 d1c2dd75 Iustin Pop
    data = self.in_text
5281 298fe380 Iustin Pop
5282 8d528b7c Iustin Pop
    result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
5283 298fe380 Iustin Pop
5284 43f5ea7a Guido Trotter
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5285 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5286 8d528b7c Iustin Pop
5287 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5288 8d528b7c Iustin Pop
5289 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5290 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5291 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5292 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5293 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5294 8d528b7c Iustin Pop
    self.out_text = stdout
5295 d1c2dd75 Iustin Pop
    if validate:
5296 d1c2dd75 Iustin Pop
      self._ValidateResult()
5297 298fe380 Iustin Pop
5298 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5299 d1c2dd75 Iustin Pop
    """Process the allocator results.
5300 538475ca Iustin Pop

5301 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5302 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5303 538475ca Iustin Pop

5304 d1c2dd75 Iustin Pop
    """
5305 d1c2dd75 Iustin Pop
    try:
5306 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5307 d1c2dd75 Iustin Pop
    except Exception, err:
5308 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5309 d1c2dd75 Iustin Pop
5310 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5311 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5312 538475ca Iustin Pop
5313 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5314 d1c2dd75 Iustin Pop
      if key not in rdict:
5315 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5316 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5317 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5318 538475ca Iustin Pop
5319 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5320 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5321 d1c2dd75 Iustin Pop
                               " is not a list")
5322 d1c2dd75 Iustin Pop
    self.out_data = rdict
5323 538475ca Iustin Pop
5324 538475ca Iustin Pop
5325 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5326 d61df03e Iustin Pop
  """Run allocator tests.
5327 d61df03e Iustin Pop

5328 d61df03e Iustin Pop
  This LU runs the allocator tests
5329 d61df03e Iustin Pop

5330 d61df03e Iustin Pop
  """
5331 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5332 d61df03e Iustin Pop
5333 d61df03e Iustin Pop
  def CheckPrereq(self):
5334 d61df03e Iustin Pop
    """Check prerequisites.
5335 d61df03e Iustin Pop

5336 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5337 d61df03e Iustin Pop

5338 d61df03e Iustin Pop
    """
5339 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5340 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5341 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5342 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5343 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5344 d61df03e Iustin Pop
                                     attr)
5345 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5346 d61df03e Iustin Pop
      if iname is not None:
5347 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5348 d61df03e Iustin Pop
                                   iname)
5349 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5350 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5351 d61df03e Iustin Pop
      for row in self.op.nics:
5352 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5353 d61df03e Iustin Pop
            "mac" not in row or
5354 d61df03e Iustin Pop
            "ip" not in row or
5355 d61df03e Iustin Pop
            "bridge" not in row):
5356 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5357 d61df03e Iustin Pop
                                     " 'nics' parameter")
5358 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5359 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5360 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5361 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5362 d61df03e Iustin Pop
      for row in self.op.disks:
5363 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5364 d61df03e Iustin Pop
            "size" not in row or
5365 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5366 d61df03e Iustin Pop
            "mode" not in row or
5367 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5368 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5369 d61df03e Iustin Pop
                                     " 'disks' parameter")
5370 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5371 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5372 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5373 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5374 d61df03e Iustin Pop
      if fname is None:
5375 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5376 d61df03e Iustin Pop
                                   self.op.name)
5377 d61df03e Iustin Pop
      self.op.name = fname
5378 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5379 d61df03e Iustin Pop
    else:
5380 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5381 d61df03e Iustin Pop
                                 self.op.mode)
5382 d61df03e Iustin Pop
5383 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5384 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5385 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5386 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5387 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5388 d61df03e Iustin Pop
                                 self.op.direction)
5389 d61df03e Iustin Pop
5390 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5391 d61df03e Iustin Pop
    """Run the allocator test.
5392 d61df03e Iustin Pop

5393 d61df03e Iustin Pop
    """
5394 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5395 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5396 29859cb7 Iustin Pop
                       mode=self.op.mode,
5397 29859cb7 Iustin Pop
                       name=self.op.name,
5398 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5399 29859cb7 Iustin Pop
                       disks=self.op.disks,
5400 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5401 29859cb7 Iustin Pop
                       os=self.op.os,
5402 29859cb7 Iustin Pop
                       tags=self.op.tags,
5403 29859cb7 Iustin Pop
                       nics=self.op.nics,
5404 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5405 29859cb7 Iustin Pop
                       )
5406 29859cb7 Iustin Pop
    else:
5407 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5408 29859cb7 Iustin Pop
                       mode=self.op.mode,
5409 29859cb7 Iustin Pop
                       name=self.op.name,
5410 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5411 29859cb7 Iustin Pop
                       )
5412 d61df03e Iustin Pop
5413 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5414 d1c2dd75 Iustin Pop
      result = ial.in_text
5415 298fe380 Iustin Pop
    else:
5416 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5417 d1c2dd75 Iustin Pop
      result = ial.out_text
5418 298fe380 Iustin Pop
    return result