Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 7b3a8fb5

History | View | Annotate | Download (190.8 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 a8083063 Iustin Pop
35 a8083063 Iustin Pop
from ganeti import rpc
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import logger
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 6048c986 Guido Trotter
from ganeti import locking
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 8d14b30d Iustin Pop
from ganeti import serializer
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 d465bdc8 Guido Trotter
    - implement ExpandNames
53 d465bdc8 Guido Trotter
    - implement CheckPrereq
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 05f86716 Guido Trotter
    - optionally redefine their run requirements:
58 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 a8083063 Iustin Pop
  REQ_MASTER = True
68 7e55040e Guido Trotter
  REQ_BGL = True
69 a8083063 Iustin Pop
70 0b38cf6e Michael Hanselmann
  def __init__(self, processor, op, context):
71 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
72 a8083063 Iustin Pop

73 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
74 a8083063 Iustin Pop
    validity.
75 a8083063 Iustin Pop

76 a8083063 Iustin Pop
    """
77 5bfac263 Iustin Pop
    self.proc = processor
78 a8083063 Iustin Pop
    self.op = op
79 77b657a3 Guido Trotter
    self.cfg = context.cfg
80 77b657a3 Guido Trotter
    self.context = context
81 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
82 d465bdc8 Guido Trotter
    self.needed_locks = None
83 6683bba2 Guido Trotter
    self.acquired_locks = {}
84 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85 ca2a79e1 Guido Trotter
    self.add_locks = {}
86 ca2a79e1 Guido Trotter
    self.remove_locks = {}
87 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
88 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
89 c92b310a Michael Hanselmann
    self.__ssh = None
90 c92b310a Michael Hanselmann
91 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
92 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
93 a8083063 Iustin Pop
      if attr_val is None:
94 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
95 3ecf6786 Iustin Pop
                                   attr_name)
96 c6d58a2b Michael Hanselmann
97 f64c9de6 Guido Trotter
    if not self.cfg.IsCluster():
98 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
99 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
100 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
101 d6a02168 Michael Hanselmann
      master = self.cfg.GetMasterNode()
102 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
103 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
104 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
105 a8083063 Iustin Pop
106 c92b310a Michael Hanselmann
  def __GetSSH(self):
107 c92b310a Michael Hanselmann
    """Returns the SshRunner object
108 c92b310a Michael Hanselmann

109 c92b310a Michael Hanselmann
    """
110 c92b310a Michael Hanselmann
    if not self.__ssh:
111 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
112 c92b310a Michael Hanselmann
    return self.__ssh
113 c92b310a Michael Hanselmann
114 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
115 c92b310a Michael Hanselmann
116 d465bdc8 Guido Trotter
  def ExpandNames(self):
117 d465bdc8 Guido Trotter
    """Expand names for this LU.
118 d465bdc8 Guido Trotter

119 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
120 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
121 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
122 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
123 d465bdc8 Guido Trotter

124 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
125 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
126 d465bdc8 Guido Trotter
    as values. Rules:
127 d465bdc8 Guido Trotter
      - Use an empty dict if you don't need any lock
128 d465bdc8 Guido Trotter
      - If you don't need any lock at a particular level omit that level
129 d465bdc8 Guido Trotter
      - Don't put anything for the BGL level
130 e310b019 Guido Trotter
      - If you want all locks at a level use locking.ALL_SET as a value
131 d465bdc8 Guido Trotter

132 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
133 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
134 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
135 3977a4c1 Guido Trotter

136 d465bdc8 Guido Trotter
    Examples:
137 d465bdc8 Guido Trotter
    # Acquire all nodes and one instance
138 d465bdc8 Guido Trotter
    self.needed_locks = {
139 e310b019 Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
140 3a5d7305 Guido Trotter
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
141 d465bdc8 Guido Trotter
    }
142 d465bdc8 Guido Trotter
    # Acquire just two nodes
143 d465bdc8 Guido Trotter
    self.needed_locks = {
144 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
145 d465bdc8 Guido Trotter
    }
146 d465bdc8 Guido Trotter
    # Acquire no locks
147 d465bdc8 Guido Trotter
    self.needed_locks = {} # No, you can't leave it to the default value None
148 d465bdc8 Guido Trotter

149 d465bdc8 Guido Trotter
    """
150 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
151 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
152 d465bdc8 Guido Trotter
    # time.
153 d465bdc8 Guido Trotter
    if self.REQ_BGL:
154 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
155 d465bdc8 Guido Trotter
    else:
156 d465bdc8 Guido Trotter
      raise NotImplementedError
157 d465bdc8 Guido Trotter
158 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
159 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
160 fb8dcb62 Guido Trotter

161 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
162 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
163 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
164 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
165 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
166 fb8dcb62 Guido Trotter
    default it does nothing.
167 fb8dcb62 Guido Trotter

168 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
169 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
170 fb8dcb62 Guido Trotter

171 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
172 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
173 fb8dcb62 Guido Trotter

174 fb8dcb62 Guido Trotter
    """
175 fb8dcb62 Guido Trotter
176 a8083063 Iustin Pop
  def CheckPrereq(self):
177 a8083063 Iustin Pop
    """Check prerequisites for this LU.
178 a8083063 Iustin Pop

179 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
180 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
181 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
182 a8083063 Iustin Pop
    allowed.
183 a8083063 Iustin Pop

184 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
185 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
186 a8083063 Iustin Pop

187 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
188 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
189 a8083063 Iustin Pop

190 a8083063 Iustin Pop
    """
191 a8083063 Iustin Pop
    raise NotImplementedError
192 a8083063 Iustin Pop
193 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
194 a8083063 Iustin Pop
    """Execute the LU.
195 a8083063 Iustin Pop

196 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
197 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
198 a8083063 Iustin Pop
    code, or expected.
199 a8083063 Iustin Pop

200 a8083063 Iustin Pop
    """
201 a8083063 Iustin Pop
    raise NotImplementedError
202 a8083063 Iustin Pop
203 a8083063 Iustin Pop
  def BuildHooksEnv(self):
204 a8083063 Iustin Pop
    """Build hooks environment for this LU.
205 a8083063 Iustin Pop

206 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
207 a8083063 Iustin Pop
    containing the environment that will be used for running the
208 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
209 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
210 a8083063 Iustin Pop
    the hook should run after the execution.
211 a8083063 Iustin Pop

212 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
213 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
214 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
215 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
216 a8083063 Iustin Pop

217 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
218 a8083063 Iustin Pop

219 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
220 a8083063 Iustin Pop
    not be called.
221 a8083063 Iustin Pop

222 a8083063 Iustin Pop
    """
223 a8083063 Iustin Pop
    raise NotImplementedError
224 a8083063 Iustin Pop
225 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
226 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
227 1fce5219 Guido Trotter

228 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
229 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
230 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
231 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
232 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
233 1fce5219 Guido Trotter

234 1fce5219 Guido Trotter
    Args:
235 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
236 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
237 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
238 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
239 1fce5219 Guido Trotter

240 1fce5219 Guido Trotter
    """
241 1fce5219 Guido Trotter
    return lu_result
242 1fce5219 Guido Trotter
243 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
244 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
245 43905206 Guido Trotter

246 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
247 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
248 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
249 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
250 43905206 Guido Trotter
    before.
251 43905206 Guido Trotter

252 43905206 Guido Trotter
    """
253 43905206 Guido Trotter
    if self.needed_locks is None:
254 43905206 Guido Trotter
      self.needed_locks = {}
255 43905206 Guido Trotter
    else:
256 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
257 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
258 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
259 43905206 Guido Trotter
    if expanded_name is None:
260 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
261 43905206 Guido Trotter
                                  self.op.instance_name)
262 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
263 43905206 Guido Trotter
    self.op.instance_name = expanded_name
264 43905206 Guido Trotter
265 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
266 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
267 c4a2fee1 Guido Trotter

268 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
269 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
270 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
271 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
272 c4a2fee1 Guido Trotter

273 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
274 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
275 c4a2fee1 Guido Trotter

276 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
277 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
278 c4a2fee1 Guido Trotter

279 c4a2fee1 Guido Trotter
    If should be called in DeclareLocks in a way similar to:
280 c4a2fee1 Guido Trotter

281 c4a2fee1 Guido Trotter
    if level == locking.LEVEL_NODE:
282 c4a2fee1 Guido Trotter
      self._LockInstancesNodes()
283 c4a2fee1 Guido Trotter

284 a82ce292 Guido Trotter
    @type primary_only: boolean
285 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
286 a82ce292 Guido Trotter

287 c4a2fee1 Guido Trotter
    """
288 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
289 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
290 c4a2fee1 Guido Trotter
291 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
292 c4a2fee1 Guido Trotter
293 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
294 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
295 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
296 c4a2fee1 Guido Trotter
    wanted_nodes = []
297 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
298 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
299 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
300 a82ce292 Guido Trotter
      if not primary_only:
301 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
302 9513b6ab Guido Trotter
303 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
304 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
305 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
306 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
307 c4a2fee1 Guido Trotter
308 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
309 c4a2fee1 Guido Trotter
310 a8083063 Iustin Pop
311 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
312 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
313 a8083063 Iustin Pop

314 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
315 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
316 a8083063 Iustin Pop

317 a8083063 Iustin Pop
  """
318 a8083063 Iustin Pop
  HPATH = None
319 a8083063 Iustin Pop
  HTYPE = None
320 a8083063 Iustin Pop
321 a8083063 Iustin Pop
322 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
323 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
324 83120a01 Michael Hanselmann

325 83120a01 Michael Hanselmann
  Args:
326 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
327 83120a01 Michael Hanselmann

328 83120a01 Michael Hanselmann
  """
329 3312b702 Iustin Pop
  if not isinstance(nodes, list):
330 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
331 dcb93971 Michael Hanselmann
332 ea47808a Guido Trotter
  if not nodes:
333 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
334 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
335 dcb93971 Michael Hanselmann
336 ea47808a Guido Trotter
  wanted = []
337 ea47808a Guido Trotter
  for name in nodes:
338 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
339 ea47808a Guido Trotter
    if node is None:
340 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
341 ea47808a Guido Trotter
    wanted.append(node)
342 dcb93971 Michael Hanselmann
343 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
344 3312b702 Iustin Pop
345 3312b702 Iustin Pop
346 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
347 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
348 3312b702 Iustin Pop

349 3312b702 Iustin Pop
  Args:
350 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
351 3312b702 Iustin Pop

352 3312b702 Iustin Pop
  """
353 3312b702 Iustin Pop
  if not isinstance(instances, list):
354 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
355 3312b702 Iustin Pop
356 3312b702 Iustin Pop
  if instances:
357 3312b702 Iustin Pop
    wanted = []
358 3312b702 Iustin Pop
359 3312b702 Iustin Pop
    for name in instances:
360 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
361 3312b702 Iustin Pop
      if instance is None:
362 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
363 3312b702 Iustin Pop
      wanted.append(instance)
364 3312b702 Iustin Pop
365 3312b702 Iustin Pop
  else:
366 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
367 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
368 dcb93971 Michael Hanselmann
369 dcb93971 Michael Hanselmann
370 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
371 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
372 83120a01 Michael Hanselmann

373 83120a01 Michael Hanselmann
  Args:
374 83120a01 Michael Hanselmann
    static: Static fields
375 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
376 83120a01 Michael Hanselmann

377 83120a01 Michael Hanselmann
  """
378 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
379 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
380 dcb93971 Michael Hanselmann
381 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
382 dcb93971 Michael Hanselmann
383 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
384 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
385 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
386 3ecf6786 Iustin Pop
                                          difference(all_fields)))
387 dcb93971 Michael Hanselmann
388 dcb93971 Michael Hanselmann
389 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
390 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
391 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
392 ecb215b5 Michael Hanselmann

393 ecb215b5 Michael Hanselmann
  Args:
394 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
395 396e1b78 Michael Hanselmann
  """
396 396e1b78 Michael Hanselmann
  env = {
397 0e137c28 Iustin Pop
    "OP_TARGET": name,
398 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
399 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
400 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
401 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
402 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
403 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
404 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
405 396e1b78 Michael Hanselmann
  }
406 396e1b78 Michael Hanselmann
407 396e1b78 Michael Hanselmann
  if nics:
408 396e1b78 Michael Hanselmann
    nic_count = len(nics)
409 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
410 396e1b78 Michael Hanselmann
      if ip is None:
411 396e1b78 Michael Hanselmann
        ip = ""
412 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
413 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
414 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
415 396e1b78 Michael Hanselmann
  else:
416 396e1b78 Michael Hanselmann
    nic_count = 0
417 396e1b78 Michael Hanselmann
418 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
419 396e1b78 Michael Hanselmann
420 396e1b78 Michael Hanselmann
  return env
421 396e1b78 Michael Hanselmann
422 396e1b78 Michael Hanselmann
423 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
424 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
425 ecb215b5 Michael Hanselmann

426 ecb215b5 Michael Hanselmann
  Args:
427 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
428 ecb215b5 Michael Hanselmann
    override: dict of values to override
429 ecb215b5 Michael Hanselmann
  """
430 396e1b78 Michael Hanselmann
  args = {
431 396e1b78 Michael Hanselmann
    'name': instance.name,
432 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
433 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
434 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
435 396e1b78 Michael Hanselmann
    'status': instance.os,
436 396e1b78 Michael Hanselmann
    'memory': instance.memory,
437 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
438 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
439 396e1b78 Michael Hanselmann
  }
440 396e1b78 Michael Hanselmann
  if override:
441 396e1b78 Michael Hanselmann
    args.update(override)
442 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
443 396e1b78 Michael Hanselmann
444 396e1b78 Michael Hanselmann
445 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
446 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
447 bf6929a2 Alexander Schreiber

448 bf6929a2 Alexander Schreiber
  """
449 bf6929a2 Alexander Schreiber
  # check bridges existance
450 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
451 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
452 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
453 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
454 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
455 bf6929a2 Alexander Schreiber
456 bf6929a2 Alexander Schreiber
457 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
458 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
459 a8083063 Iustin Pop

460 a8083063 Iustin Pop
  """
461 a8083063 Iustin Pop
  _OP_REQP = []
462 a8083063 Iustin Pop
463 a8083063 Iustin Pop
  def CheckPrereq(self):
464 a8083063 Iustin Pop
    """Check prerequisites.
465 a8083063 Iustin Pop

466 a8083063 Iustin Pop
    This checks whether the cluster is empty.
467 a8083063 Iustin Pop

468 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
469 a8083063 Iustin Pop

470 a8083063 Iustin Pop
    """
471 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
472 a8083063 Iustin Pop
473 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
474 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
475 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
476 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
477 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
478 db915bd1 Michael Hanselmann
    if instancelist:
479 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
480 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
481 a8083063 Iustin Pop
482 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
483 a8083063 Iustin Pop
    """Destroys the cluster.
484 a8083063 Iustin Pop

485 a8083063 Iustin Pop
    """
486 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
487 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
488 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
489 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
490 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
491 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
492 140aa4a8 Iustin Pop
    return master
493 a8083063 Iustin Pop
494 a8083063 Iustin Pop
495 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
496 a8083063 Iustin Pop
  """Verifies the cluster status.
497 a8083063 Iustin Pop

498 a8083063 Iustin Pop
  """
499 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
500 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
501 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
502 d4b9d97f Guido Trotter
  REQ_BGL = False
503 d4b9d97f Guido Trotter
504 d4b9d97f Guido Trotter
  def ExpandNames(self):
505 d4b9d97f Guido Trotter
    self.needed_locks = {
506 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
507 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
508 d4b9d97f Guido Trotter
    }
509 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
510 a8083063 Iustin Pop
511 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
512 a8083063 Iustin Pop
                  remote_version, feedback_fn):
513 a8083063 Iustin Pop
    """Run multiple tests against a node.
514 a8083063 Iustin Pop

515 a8083063 Iustin Pop
    Test list:
516 a8083063 Iustin Pop
      - compares ganeti version
517 a8083063 Iustin Pop
      - checks vg existance and size > 20G
518 a8083063 Iustin Pop
      - checks config file checksum
519 a8083063 Iustin Pop
      - checks ssh to other nodes
520 a8083063 Iustin Pop

521 a8083063 Iustin Pop
    Args:
522 a8083063 Iustin Pop
      node: name of the node to check
523 a8083063 Iustin Pop
      file_list: required list of files
524 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
525 098c0958 Michael Hanselmann

526 a8083063 Iustin Pop
    """
527 a8083063 Iustin Pop
    # compares ganeti version
528 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
529 a8083063 Iustin Pop
    if not remote_version:
530 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
531 a8083063 Iustin Pop
      return True
532 a8083063 Iustin Pop
533 a8083063 Iustin Pop
    if local_version != remote_version:
534 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
535 a8083063 Iustin Pop
                      (local_version, node, remote_version))
536 a8083063 Iustin Pop
      return True
537 a8083063 Iustin Pop
538 a8083063 Iustin Pop
    # checks vg existance and size > 20G
539 a8083063 Iustin Pop
540 a8083063 Iustin Pop
    bad = False
541 a8083063 Iustin Pop
    if not vglist:
542 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
543 a8083063 Iustin Pop
                      (node,))
544 a8083063 Iustin Pop
      bad = True
545 a8083063 Iustin Pop
    else:
546 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
547 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
548 a8083063 Iustin Pop
      if vgstatus:
549 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
550 a8083063 Iustin Pop
        bad = True
551 a8083063 Iustin Pop
552 a8083063 Iustin Pop
    # checks config file checksum
553 a8083063 Iustin Pop
    # checks ssh to any
554 a8083063 Iustin Pop
555 a8083063 Iustin Pop
    if 'filelist' not in node_result:
556 a8083063 Iustin Pop
      bad = True
557 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
558 a8083063 Iustin Pop
    else:
559 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
560 a8083063 Iustin Pop
      for file_name in file_list:
561 a8083063 Iustin Pop
        if file_name not in remote_cksum:
562 a8083063 Iustin Pop
          bad = True
563 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
564 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
565 a8083063 Iustin Pop
          bad = True
566 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
567 a8083063 Iustin Pop
568 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
569 a8083063 Iustin Pop
      bad = True
570 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
571 a8083063 Iustin Pop
    else:
572 a8083063 Iustin Pop
      if node_result['nodelist']:
573 a8083063 Iustin Pop
        bad = True
574 a8083063 Iustin Pop
        for node in node_result['nodelist']:
575 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
576 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
577 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
578 9d4bfc96 Iustin Pop
      bad = True
579 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
580 9d4bfc96 Iustin Pop
    else:
581 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
582 9d4bfc96 Iustin Pop
        bad = True
583 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
584 9d4bfc96 Iustin Pop
        for node in nlist:
585 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
586 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
587 9d4bfc96 Iustin Pop
588 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
589 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
590 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
591 e69d05fd Iustin Pop
        if hv_result is not None:
592 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
593 e69d05fd Iustin Pop
                      (hv_name, hv_result))
594 a8083063 Iustin Pop
    return bad
595 a8083063 Iustin Pop
596 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
597 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
598 a8083063 Iustin Pop
    """Verify an instance.
599 a8083063 Iustin Pop

600 a8083063 Iustin Pop
    This function checks to see if the required block devices are
601 a8083063 Iustin Pop
    available on the instance's node.
602 a8083063 Iustin Pop

603 a8083063 Iustin Pop
    """
604 a8083063 Iustin Pop
    bad = False
605 a8083063 Iustin Pop
606 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
607 a8083063 Iustin Pop
608 a8083063 Iustin Pop
    node_vol_should = {}
609 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
610 a8083063 Iustin Pop
611 a8083063 Iustin Pop
    for node in node_vol_should:
612 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
613 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
614 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
615 a8083063 Iustin Pop
                          (volume, node))
616 a8083063 Iustin Pop
          bad = True
617 a8083063 Iustin Pop
618 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
619 a872dae6 Guido Trotter
      if (node_current not in node_instance or
620 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
621 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
622 a8083063 Iustin Pop
                        (instance, node_current))
623 a8083063 Iustin Pop
        bad = True
624 a8083063 Iustin Pop
625 a8083063 Iustin Pop
    for node in node_instance:
626 a8083063 Iustin Pop
      if (not node == node_current):
627 a8083063 Iustin Pop
        if instance in node_instance[node]:
628 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
629 a8083063 Iustin Pop
                          (instance, node))
630 a8083063 Iustin Pop
          bad = True
631 a8083063 Iustin Pop
632 6a438c98 Michael Hanselmann
    return bad
633 a8083063 Iustin Pop
634 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
635 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
636 a8083063 Iustin Pop

637 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
638 a8083063 Iustin Pop
    reported as unknown.
639 a8083063 Iustin Pop

640 a8083063 Iustin Pop
    """
641 a8083063 Iustin Pop
    bad = False
642 a8083063 Iustin Pop
643 a8083063 Iustin Pop
    for node in node_vol_is:
644 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
645 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
646 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
647 a8083063 Iustin Pop
                      (volume, node))
648 a8083063 Iustin Pop
          bad = True
649 a8083063 Iustin Pop
    return bad
650 a8083063 Iustin Pop
651 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
652 a8083063 Iustin Pop
    """Verify the list of running instances.
653 a8083063 Iustin Pop

654 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
655 a8083063 Iustin Pop

656 a8083063 Iustin Pop
    """
657 a8083063 Iustin Pop
    bad = False
658 a8083063 Iustin Pop
    for node in node_instance:
659 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
660 a8083063 Iustin Pop
        if runninginstance not in instancelist:
661 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
662 a8083063 Iustin Pop
                          (runninginstance, node))
663 a8083063 Iustin Pop
          bad = True
664 a8083063 Iustin Pop
    return bad
665 a8083063 Iustin Pop
666 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
667 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
668 2b3b6ddd Guido Trotter

669 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
670 2b3b6ddd Guido Trotter
    was primary for.
671 2b3b6ddd Guido Trotter

672 2b3b6ddd Guido Trotter
    """
673 2b3b6ddd Guido Trotter
    bad = False
674 2b3b6ddd Guido Trotter
675 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
676 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
677 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
678 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
679 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
680 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
681 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
682 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
683 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
684 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
685 2b3b6ddd Guido Trotter
        needed_mem = 0
686 2b3b6ddd Guido Trotter
        for instance in instances:
687 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
688 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
689 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
690 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
691 2b3b6ddd Guido Trotter
          bad = True
692 2b3b6ddd Guido Trotter
    return bad
693 2b3b6ddd Guido Trotter
694 a8083063 Iustin Pop
  def CheckPrereq(self):
695 a8083063 Iustin Pop
    """Check prerequisites.
696 a8083063 Iustin Pop

697 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
698 e54c4c5e Guido Trotter
    all its members are valid.
699 a8083063 Iustin Pop

700 a8083063 Iustin Pop
    """
701 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
702 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
703 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
704 a8083063 Iustin Pop
705 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
706 d8fff41c Guido Trotter
    """Build hooks env.
707 d8fff41c Guido Trotter

708 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
709 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
710 d8fff41c Guido Trotter

711 d8fff41c Guido Trotter
    """
712 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
713 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
714 d8fff41c Guido Trotter
    env = {}
715 d8fff41c Guido Trotter
    return env, [], all_nodes
716 d8fff41c Guido Trotter
717 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
718 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
719 a8083063 Iustin Pop

720 a8083063 Iustin Pop
    """
721 a8083063 Iustin Pop
    bad = False
722 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
723 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
724 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
725 a8083063 Iustin Pop
726 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
727 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
728 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
729 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
730 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
731 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
732 a8083063 Iustin Pop
    node_volume = {}
733 a8083063 Iustin Pop
    node_instance = {}
734 9c9c7d30 Guido Trotter
    node_info = {}
735 26b6af5e Guido Trotter
    instance_cfg = {}
736 a8083063 Iustin Pop
737 a8083063 Iustin Pop
    # FIXME: verify OS list
738 a8083063 Iustin Pop
    # do local checksums
739 d6a02168 Michael Hanselmann
    file_names = []
740 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
741 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
742 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
743 a8083063 Iustin Pop
744 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
745 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
746 e69d05fd Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist, hypervisors)
747 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
748 a8083063 Iustin Pop
    node_verify_param = {
749 a8083063 Iustin Pop
      'filelist': file_names,
750 a8083063 Iustin Pop
      'nodelist': nodelist,
751 e69d05fd Iustin Pop
      'hypervisor': hypervisors,
752 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
753 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
754 a8083063 Iustin Pop
      }
755 62c9ec92 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param,
756 62c9ec92 Iustin Pop
                                      self.cfg.GetClusterName())
757 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
758 e69d05fd Iustin Pop
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName(),
759 e69d05fd Iustin Pop
                                   self.cfg.GetHypervisorType())
760 a8083063 Iustin Pop
761 a8083063 Iustin Pop
    for node in nodelist:
762 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
763 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
764 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
765 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
766 a8083063 Iustin Pop
      bad = bad or result
767 a8083063 Iustin Pop
768 a8083063 Iustin Pop
      # node_volume
769 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
770 a8083063 Iustin Pop
771 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
772 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
773 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
774 b63ed789 Iustin Pop
        bad = True
775 b63ed789 Iustin Pop
        node_volume[node] = {}
776 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
777 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
778 a8083063 Iustin Pop
        bad = True
779 a8083063 Iustin Pop
        continue
780 b63ed789 Iustin Pop
      else:
781 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
782 a8083063 Iustin Pop
783 a8083063 Iustin Pop
      # node_instance
784 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
785 a8083063 Iustin Pop
      if type(nodeinstance) != list:
786 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
787 a8083063 Iustin Pop
        bad = True
788 a8083063 Iustin Pop
        continue
789 a8083063 Iustin Pop
790 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
791 a8083063 Iustin Pop
792 9c9c7d30 Guido Trotter
      # node_info
793 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
794 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
795 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
796 9c9c7d30 Guido Trotter
        bad = True
797 9c9c7d30 Guido Trotter
        continue
798 9c9c7d30 Guido Trotter
799 9c9c7d30 Guido Trotter
      try:
800 9c9c7d30 Guido Trotter
        node_info[node] = {
801 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
802 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
803 93e4c50b Guido Trotter
          "pinst": [],
804 93e4c50b Guido Trotter
          "sinst": [],
805 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
806 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
807 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
808 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
809 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
810 36e7da50 Guido Trotter
          # secondary.
811 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
812 9c9c7d30 Guido Trotter
        }
813 9c9c7d30 Guido Trotter
      except ValueError:
814 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
815 9c9c7d30 Guido Trotter
        bad = True
816 9c9c7d30 Guido Trotter
        continue
817 9c9c7d30 Guido Trotter
818 a8083063 Iustin Pop
    node_vol_should = {}
819 a8083063 Iustin Pop
820 a8083063 Iustin Pop
    for instance in instancelist:
821 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
822 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
823 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
824 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
825 c5705f58 Guido Trotter
      bad = bad or result
826 a8083063 Iustin Pop
827 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
828 a8083063 Iustin Pop
829 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
830 26b6af5e Guido Trotter
831 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
832 93e4c50b Guido Trotter
      if pnode in node_info:
833 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
834 93e4c50b Guido Trotter
      else:
835 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
836 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
837 93e4c50b Guido Trotter
        bad = True
838 93e4c50b Guido Trotter
839 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
840 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
841 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
842 93e4c50b Guido Trotter
      # supported either.
843 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
844 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
845 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
846 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
847 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
848 93e4c50b Guido Trotter
                    % instance)
849 93e4c50b Guido Trotter
850 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
851 93e4c50b Guido Trotter
        if snode in node_info:
852 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
853 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
854 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
855 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
856 93e4c50b Guido Trotter
        else:
857 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
858 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
859 93e4c50b Guido Trotter
860 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
861 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
862 a8083063 Iustin Pop
                                       feedback_fn)
863 a8083063 Iustin Pop
    bad = bad or result
864 a8083063 Iustin Pop
865 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
866 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
867 a8083063 Iustin Pop
                                         feedback_fn)
868 a8083063 Iustin Pop
    bad = bad or result
869 a8083063 Iustin Pop
870 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
871 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
872 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
873 e54c4c5e Guido Trotter
      bad = bad or result
874 2b3b6ddd Guido Trotter
875 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
876 2b3b6ddd Guido Trotter
    if i_non_redundant:
877 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
878 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
879 2b3b6ddd Guido Trotter
880 34290825 Michael Hanselmann
    return not bad
881 a8083063 Iustin Pop
882 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
883 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
884 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
885 d8fff41c Guido Trotter

886 d8fff41c Guido Trotter
    Args:
887 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
888 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
889 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
890 d8fff41c Guido Trotter
      lu_result: previous Exec result
891 d8fff41c Guido Trotter

892 d8fff41c Guido Trotter
    """
893 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
894 38206f3c Iustin Pop
    # their results
895 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
896 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
897 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
898 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
899 d8fff41c Guido Trotter
      if not hooks_results:
900 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
901 d8fff41c Guido Trotter
        lu_result = 1
902 d8fff41c Guido Trotter
      else:
903 d8fff41c Guido Trotter
        for node_name in hooks_results:
904 d8fff41c Guido Trotter
          show_node_header = True
905 d8fff41c Guido Trotter
          res = hooks_results[node_name]
906 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
907 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
908 d8fff41c Guido Trotter
            lu_result = 1
909 d8fff41c Guido Trotter
            continue
910 d8fff41c Guido Trotter
          for script, hkr, output in res:
911 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
912 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
913 d8fff41c Guido Trotter
              # failing hooks on that node
914 d8fff41c Guido Trotter
              if show_node_header:
915 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
916 d8fff41c Guido Trotter
                show_node_header = False
917 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
918 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
919 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
920 d8fff41c Guido Trotter
              lu_result = 1
921 d8fff41c Guido Trotter
922 d8fff41c Guido Trotter
      return lu_result
923 d8fff41c Guido Trotter
924 a8083063 Iustin Pop
925 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
926 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
927 2c95a8d4 Iustin Pop

928 2c95a8d4 Iustin Pop
  """
929 2c95a8d4 Iustin Pop
  _OP_REQP = []
930 d4b9d97f Guido Trotter
  REQ_BGL = False
931 d4b9d97f Guido Trotter
932 d4b9d97f Guido Trotter
  def ExpandNames(self):
933 d4b9d97f Guido Trotter
    self.needed_locks = {
934 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
935 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
936 d4b9d97f Guido Trotter
    }
937 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
938 2c95a8d4 Iustin Pop
939 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
940 2c95a8d4 Iustin Pop
    """Check prerequisites.
941 2c95a8d4 Iustin Pop

942 2c95a8d4 Iustin Pop
    This has no prerequisites.
943 2c95a8d4 Iustin Pop

944 2c95a8d4 Iustin Pop
    """
945 2c95a8d4 Iustin Pop
    pass
946 2c95a8d4 Iustin Pop
947 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
948 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
949 2c95a8d4 Iustin Pop

950 2c95a8d4 Iustin Pop
    """
951 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
952 2c95a8d4 Iustin Pop
953 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
954 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
955 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
956 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
957 2c95a8d4 Iustin Pop
958 2c95a8d4 Iustin Pop
    nv_dict = {}
959 2c95a8d4 Iustin Pop
    for inst in instances:
960 2c95a8d4 Iustin Pop
      inst_lvs = {}
961 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
962 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
963 2c95a8d4 Iustin Pop
        continue
964 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
965 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
966 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
967 2c95a8d4 Iustin Pop
        for vol in vol_list:
968 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
969 2c95a8d4 Iustin Pop
970 2c95a8d4 Iustin Pop
    if not nv_dict:
971 2c95a8d4 Iustin Pop
      return result
972 2c95a8d4 Iustin Pop
973 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
974 2c95a8d4 Iustin Pop
975 2c95a8d4 Iustin Pop
    to_act = set()
976 2c95a8d4 Iustin Pop
    for node in nodes:
977 2c95a8d4 Iustin Pop
      # node_volume
978 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
979 2c95a8d4 Iustin Pop
980 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
981 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
982 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
983 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
984 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
985 2c95a8d4 Iustin Pop
                    (node,))
986 2c95a8d4 Iustin Pop
        res_nodes.append(node)
987 2c95a8d4 Iustin Pop
        continue
988 2c95a8d4 Iustin Pop
989 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
990 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
991 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
992 b63ed789 Iustin Pop
            and inst.name not in res_instances):
993 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
994 2c95a8d4 Iustin Pop
995 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
996 b63ed789 Iustin Pop
    # data better
997 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
998 b63ed789 Iustin Pop
      if inst.name not in res_missing:
999 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1000 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1001 b63ed789 Iustin Pop
1002 2c95a8d4 Iustin Pop
    return result
1003 2c95a8d4 Iustin Pop
1004 2c95a8d4 Iustin Pop
1005 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1006 07bd8a51 Iustin Pop
  """Rename the cluster.
1007 07bd8a51 Iustin Pop

1008 07bd8a51 Iustin Pop
  """
1009 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1010 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1011 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1012 07bd8a51 Iustin Pop
1013 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1014 07bd8a51 Iustin Pop
    """Build hooks env.
1015 07bd8a51 Iustin Pop

1016 07bd8a51 Iustin Pop
    """
1017 07bd8a51 Iustin Pop
    env = {
1018 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1019 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1020 07bd8a51 Iustin Pop
      }
1021 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1022 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1023 07bd8a51 Iustin Pop
1024 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1025 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1026 07bd8a51 Iustin Pop

1027 07bd8a51 Iustin Pop
    """
1028 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1029 07bd8a51 Iustin Pop
1030 bcf043c9 Iustin Pop
    new_name = hostname.name
1031 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1032 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1033 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1034 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1035 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1036 07bd8a51 Iustin Pop
                                 " cluster has changed")
1037 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1038 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1039 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1040 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1041 07bd8a51 Iustin Pop
                                   new_ip)
1042 07bd8a51 Iustin Pop
1043 07bd8a51 Iustin Pop
    self.op.name = new_name
1044 07bd8a51 Iustin Pop
1045 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1046 07bd8a51 Iustin Pop
    """Rename the cluster.
1047 07bd8a51 Iustin Pop

1048 07bd8a51 Iustin Pop
    """
1049 07bd8a51 Iustin Pop
    clustername = self.op.name
1050 07bd8a51 Iustin Pop
    ip = self.ip
1051 07bd8a51 Iustin Pop
1052 07bd8a51 Iustin Pop
    # shutdown the master IP
1053 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1054 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
1055 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1056 07bd8a51 Iustin Pop
1057 07bd8a51 Iustin Pop
    try:
1058 07bd8a51 Iustin Pop
      # modify the sstore
1059 d6a02168 Michael Hanselmann
      # TODO: sstore
1060 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1061 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1062 07bd8a51 Iustin Pop
1063 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1064 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1065 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1066 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1067 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1068 07bd8a51 Iustin Pop
1069 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1070 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1071 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1072 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1073 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1074 07bd8a51 Iustin Pop
          if not result[to_node]:
1075 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1076 07bd8a51 Iustin Pop
                         (fname, to_node))
1077 07bd8a51 Iustin Pop
    finally:
1078 1c65840b Iustin Pop
      if not rpc.call_node_start_master(master, False):
1079 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1080 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1081 07bd8a51 Iustin Pop
1082 07bd8a51 Iustin Pop
1083 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1084 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1085 8084f9f6 Manuel Franceschini

1086 8084f9f6 Manuel Franceschini
  Args:
1087 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1088 8084f9f6 Manuel Franceschini

1089 8084f9f6 Manuel Franceschini
  Returns:
1090 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1091 8084f9f6 Manuel Franceschini

1092 8084f9f6 Manuel Franceschini
  """
1093 8084f9f6 Manuel Franceschini
  if disk.children:
1094 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1095 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1096 8084f9f6 Manuel Franceschini
        return True
1097 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1098 8084f9f6 Manuel Franceschini
1099 8084f9f6 Manuel Franceschini
1100 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1101 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1102 8084f9f6 Manuel Franceschini

1103 8084f9f6 Manuel Franceschini
  """
1104 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1105 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1106 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1107 c53279cf Guido Trotter
  REQ_BGL = False
1108 c53279cf Guido Trotter
1109 c53279cf Guido Trotter
  def ExpandNames(self):
1110 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1111 c53279cf Guido Trotter
    # all nodes to be modified.
1112 c53279cf Guido Trotter
    self.needed_locks = {
1113 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1114 c53279cf Guido Trotter
    }
1115 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1116 8084f9f6 Manuel Franceschini
1117 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1118 8084f9f6 Manuel Franceschini
    """Build hooks env.
1119 8084f9f6 Manuel Franceschini

1120 8084f9f6 Manuel Franceschini
    """
1121 8084f9f6 Manuel Franceschini
    env = {
1122 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1123 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1124 8084f9f6 Manuel Franceschini
      }
1125 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1126 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1127 8084f9f6 Manuel Franceschini
1128 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1129 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1130 8084f9f6 Manuel Franceschini

1131 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1132 5f83e263 Iustin Pop
    if the given volume group is valid.
1133 8084f9f6 Manuel Franceschini

1134 8084f9f6 Manuel Franceschini
    """
1135 c53279cf Guido Trotter
    # FIXME: This only works because there is only one parameter that can be
1136 c53279cf Guido Trotter
    # changed or removed.
1137 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
1138 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1139 8084f9f6 Manuel Franceschini
      for inst in instances:
1140 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1141 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1142 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1143 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1144 8084f9f6 Manuel Franceschini
1145 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1146 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1147 c53279cf Guido Trotter
      node_list = self.acquired_locks[locking.LEVEL_NODE]
1148 8084f9f6 Manuel Franceschini
      vglist = rpc.call_vg_list(node_list)
1149 8084f9f6 Manuel Franceschini
      for node in node_list:
1150 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1151 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1152 8084f9f6 Manuel Franceschini
        if vgstatus:
1153 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1154 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1155 8084f9f6 Manuel Franceschini
1156 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1157 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1158 8084f9f6 Manuel Franceschini

1159 8084f9f6 Manuel Franceschini
    """
1160 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
1161 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
1162 8084f9f6 Manuel Franceschini
    else:
1163 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1164 8084f9f6 Manuel Franceschini
                  " state, not changing")
1165 8084f9f6 Manuel Franceschini
1166 8084f9f6 Manuel Franceschini
1167 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1168 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1169 a8083063 Iustin Pop

1170 a8083063 Iustin Pop
  """
1171 a8083063 Iustin Pop
  if not instance.disks:
1172 a8083063 Iustin Pop
    return True
1173 a8083063 Iustin Pop
1174 a8083063 Iustin Pop
  if not oneshot:
1175 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1176 a8083063 Iustin Pop
1177 a8083063 Iustin Pop
  node = instance.primary_node
1178 a8083063 Iustin Pop
1179 a8083063 Iustin Pop
  for dev in instance.disks:
1180 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1181 a8083063 Iustin Pop
1182 a8083063 Iustin Pop
  retries = 0
1183 a8083063 Iustin Pop
  while True:
1184 a8083063 Iustin Pop
    max_time = 0
1185 a8083063 Iustin Pop
    done = True
1186 a8083063 Iustin Pop
    cumul_degraded = False
1187 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1188 a8083063 Iustin Pop
    if not rstats:
1189 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1190 a8083063 Iustin Pop
      retries += 1
1191 a8083063 Iustin Pop
      if retries >= 10:
1192 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1193 3ecf6786 Iustin Pop
                                 " aborting." % node)
1194 a8083063 Iustin Pop
      time.sleep(6)
1195 a8083063 Iustin Pop
      continue
1196 a8083063 Iustin Pop
    retries = 0
1197 a8083063 Iustin Pop
    for i in range(len(rstats)):
1198 a8083063 Iustin Pop
      mstat = rstats[i]
1199 a8083063 Iustin Pop
      if mstat is None:
1200 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1201 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1202 a8083063 Iustin Pop
        continue
1203 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1204 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1205 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1206 a8083063 Iustin Pop
      if perc_done is not None:
1207 a8083063 Iustin Pop
        done = False
1208 a8083063 Iustin Pop
        if est_time is not None:
1209 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1210 a8083063 Iustin Pop
          max_time = est_time
1211 a8083063 Iustin Pop
        else:
1212 a8083063 Iustin Pop
          rem_time = "no time estimate"
1213 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1214 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1215 a8083063 Iustin Pop
    if done or oneshot:
1216 a8083063 Iustin Pop
      break
1217 a8083063 Iustin Pop
1218 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1219 a8083063 Iustin Pop
1220 a8083063 Iustin Pop
  if done:
1221 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1222 a8083063 Iustin Pop
  return not cumul_degraded
1223 a8083063 Iustin Pop
1224 a8083063 Iustin Pop
1225 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1226 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1227 a8083063 Iustin Pop

1228 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1229 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1230 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1231 0834c866 Iustin Pop

1232 a8083063 Iustin Pop
  """
1233 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1234 0834c866 Iustin Pop
  if ldisk:
1235 0834c866 Iustin Pop
    idx = 6
1236 0834c866 Iustin Pop
  else:
1237 0834c866 Iustin Pop
    idx = 5
1238 a8083063 Iustin Pop
1239 a8083063 Iustin Pop
  result = True
1240 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1241 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1242 a8083063 Iustin Pop
    if not rstats:
1243 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1244 a8083063 Iustin Pop
      result = False
1245 a8083063 Iustin Pop
    else:
1246 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1247 a8083063 Iustin Pop
  if dev.children:
1248 a8083063 Iustin Pop
    for child in dev.children:
1249 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1250 a8083063 Iustin Pop
1251 a8083063 Iustin Pop
  return result
1252 a8083063 Iustin Pop
1253 a8083063 Iustin Pop
1254 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1255 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1256 a8083063 Iustin Pop

1257 a8083063 Iustin Pop
  """
1258 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1259 6bf01bbb Guido Trotter
  REQ_BGL = False
1260 a8083063 Iustin Pop
1261 6bf01bbb Guido Trotter
  def ExpandNames(self):
1262 1f9430d6 Iustin Pop
    if self.op.names:
1263 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1264 1f9430d6 Iustin Pop
1265 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1266 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1267 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1268 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1269 1f9430d6 Iustin Pop
1270 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1271 6bf01bbb Guido Trotter
    self.needed_locks = {}
1272 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1273 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1274 6bf01bbb Guido Trotter
1275 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1276 6bf01bbb Guido Trotter
    """Check prerequisites.
1277 6bf01bbb Guido Trotter

1278 6bf01bbb Guido Trotter
    """
1279 6bf01bbb Guido Trotter
1280 1f9430d6 Iustin Pop
  @staticmethod
1281 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1282 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1283 1f9430d6 Iustin Pop

1284 1f9430d6 Iustin Pop
      Args:
1285 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1286 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1287 1f9430d6 Iustin Pop

1288 1f9430d6 Iustin Pop
      Returns:
1289 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1290 1f9430d6 Iustin Pop
             nodes as
1291 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1292 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1293 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1294 1f9430d6 Iustin Pop
                  }
1295 1f9430d6 Iustin Pop

1296 1f9430d6 Iustin Pop
    """
1297 1f9430d6 Iustin Pop
    all_os = {}
1298 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1299 1f9430d6 Iustin Pop
      if not nr:
1300 1f9430d6 Iustin Pop
        continue
1301 b4de68a9 Iustin Pop
      for os_obj in nr:
1302 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1303 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1304 1f9430d6 Iustin Pop
          # for each node in node_list
1305 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1306 1f9430d6 Iustin Pop
          for nname in node_list:
1307 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1308 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1309 1f9430d6 Iustin Pop
    return all_os
1310 a8083063 Iustin Pop
1311 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1312 a8083063 Iustin Pop
    """Compute the list of OSes.
1313 a8083063 Iustin Pop

1314 a8083063 Iustin Pop
    """
1315 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1316 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1317 a8083063 Iustin Pop
    if node_data == False:
1318 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1319 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1320 1f9430d6 Iustin Pop
    output = []
1321 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1322 1f9430d6 Iustin Pop
      row = []
1323 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1324 1f9430d6 Iustin Pop
        if field == "name":
1325 1f9430d6 Iustin Pop
          val = os_name
1326 1f9430d6 Iustin Pop
        elif field == "valid":
1327 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1328 1f9430d6 Iustin Pop
        elif field == "node_status":
1329 1f9430d6 Iustin Pop
          val = {}
1330 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1331 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1332 1f9430d6 Iustin Pop
        else:
1333 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1334 1f9430d6 Iustin Pop
        row.append(val)
1335 1f9430d6 Iustin Pop
      output.append(row)
1336 1f9430d6 Iustin Pop
1337 1f9430d6 Iustin Pop
    return output
1338 a8083063 Iustin Pop
1339 a8083063 Iustin Pop
1340 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1341 a8083063 Iustin Pop
  """Logical unit for removing a node.
1342 a8083063 Iustin Pop

1343 a8083063 Iustin Pop
  """
1344 a8083063 Iustin Pop
  HPATH = "node-remove"
1345 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1346 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1347 a8083063 Iustin Pop
1348 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1349 a8083063 Iustin Pop
    """Build hooks env.
1350 a8083063 Iustin Pop

1351 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1352 d08869ee Guido Trotter
    node would then be impossible to remove.
1353 a8083063 Iustin Pop

1354 a8083063 Iustin Pop
    """
1355 396e1b78 Michael Hanselmann
    env = {
1356 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1357 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1358 396e1b78 Michael Hanselmann
      }
1359 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1360 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1361 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1362 a8083063 Iustin Pop
1363 a8083063 Iustin Pop
  def CheckPrereq(self):
1364 a8083063 Iustin Pop
    """Check prerequisites.
1365 a8083063 Iustin Pop

1366 a8083063 Iustin Pop
    This checks:
1367 a8083063 Iustin Pop
     - the node exists in the configuration
1368 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1369 a8083063 Iustin Pop
     - it's not the master
1370 a8083063 Iustin Pop

1371 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1372 a8083063 Iustin Pop

1373 a8083063 Iustin Pop
    """
1374 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1375 a8083063 Iustin Pop
    if node is None:
1376 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1377 a8083063 Iustin Pop
1378 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1379 a8083063 Iustin Pop
1380 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1381 a8083063 Iustin Pop
    if node.name == masternode:
1382 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1383 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1384 a8083063 Iustin Pop
1385 a8083063 Iustin Pop
    for instance_name in instance_list:
1386 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1387 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1388 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1389 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1390 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1391 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1392 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1393 a8083063 Iustin Pop
    self.op.node_name = node.name
1394 a8083063 Iustin Pop
    self.node = node
1395 a8083063 Iustin Pop
1396 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1397 a8083063 Iustin Pop
    """Removes the node from the cluster.
1398 a8083063 Iustin Pop

1399 a8083063 Iustin Pop
    """
1400 a8083063 Iustin Pop
    node = self.node
1401 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1402 a8083063 Iustin Pop
                node.name)
1403 a8083063 Iustin Pop
1404 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1405 a8083063 Iustin Pop
1406 d8470559 Michael Hanselmann
    rpc.call_node_leave_cluster(node.name)
1407 c8a0948f Michael Hanselmann
1408 a8083063 Iustin Pop
1409 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1410 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1411 a8083063 Iustin Pop

1412 a8083063 Iustin Pop
  """
1413 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1414 35705d8f Guido Trotter
  REQ_BGL = False
1415 a8083063 Iustin Pop
1416 35705d8f Guido Trotter
  def ExpandNames(self):
1417 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1418 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1419 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1420 e8a4c138 Iustin Pop
      "bootid",
1421 e8a4c138 Iustin Pop
      "ctotal",
1422 e8a4c138 Iustin Pop
      ])
1423 a8083063 Iustin Pop
1424 c8d8b4c8 Iustin Pop
    self.static_fields = frozenset([
1425 c8d8b4c8 Iustin Pop
      "name", "pinst_cnt", "sinst_cnt",
1426 c8d8b4c8 Iustin Pop
      "pinst_list", "sinst_list",
1427 c8d8b4c8 Iustin Pop
      "pip", "sip", "tags",
1428 38d7239a Iustin Pop
      "serial_no",
1429 c8d8b4c8 Iustin Pop
      ])
1430 c8d8b4c8 Iustin Pop
1431 c8d8b4c8 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
1432 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1433 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1434 a8083063 Iustin Pop
1435 35705d8f Guido Trotter
    self.needed_locks = {}
1436 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1437 c8d8b4c8 Iustin Pop
1438 c8d8b4c8 Iustin Pop
    if self.op.names:
1439 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1440 35705d8f Guido Trotter
    else:
1441 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1442 c8d8b4c8 Iustin Pop
1443 c8d8b4c8 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
1444 c8d8b4c8 Iustin Pop
    if self.do_locking:
1445 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1446 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1447 c8d8b4c8 Iustin Pop
1448 35705d8f Guido Trotter
1449 35705d8f Guido Trotter
  def CheckPrereq(self):
1450 35705d8f Guido Trotter
    """Check prerequisites.
1451 35705d8f Guido Trotter

1452 35705d8f Guido Trotter
    """
1453 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1454 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1455 c8d8b4c8 Iustin Pop
    pass
1456 a8083063 Iustin Pop
1457 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1458 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1459 a8083063 Iustin Pop

1460 a8083063 Iustin Pop
    """
1461 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1462 c8d8b4c8 Iustin Pop
    if self.do_locking:
1463 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1464 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1465 3fa93523 Guido Trotter
      nodenames = self.wanted
1466 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1467 3fa93523 Guido Trotter
      if missing:
1468 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1469 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1470 c8d8b4c8 Iustin Pop
    else:
1471 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1472 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1473 a8083063 Iustin Pop
1474 a8083063 Iustin Pop
    # begin data gathering
1475 a8083063 Iustin Pop
1476 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1477 a8083063 Iustin Pop
      live_data = {}
1478 e69d05fd Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1479 e69d05fd Iustin Pop
                                     self.cfg.GetHypervisorType())
1480 a8083063 Iustin Pop
      for name in nodenames:
1481 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1482 a8083063 Iustin Pop
        if nodeinfo:
1483 a8083063 Iustin Pop
          live_data[name] = {
1484 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1485 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1486 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1487 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1488 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1489 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1490 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1491 a8083063 Iustin Pop
            }
1492 a8083063 Iustin Pop
        else:
1493 a8083063 Iustin Pop
          live_data[name] = {}
1494 a8083063 Iustin Pop
    else:
1495 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1496 a8083063 Iustin Pop
1497 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1498 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1499 a8083063 Iustin Pop
1500 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1501 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1502 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1503 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1504 a8083063 Iustin Pop
1505 ec223efb Iustin Pop
      for instance_name in instancelist:
1506 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1507 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1508 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1509 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1510 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1511 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1512 a8083063 Iustin Pop
1513 a8083063 Iustin Pop
    # end data gathering
1514 a8083063 Iustin Pop
1515 a8083063 Iustin Pop
    output = []
1516 a8083063 Iustin Pop
    for node in nodelist:
1517 a8083063 Iustin Pop
      node_output = []
1518 a8083063 Iustin Pop
      for field in self.op.output_fields:
1519 a8083063 Iustin Pop
        if field == "name":
1520 a8083063 Iustin Pop
          val = node.name
1521 ec223efb Iustin Pop
        elif field == "pinst_list":
1522 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1523 ec223efb Iustin Pop
        elif field == "sinst_list":
1524 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1525 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1526 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1527 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1528 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1529 a8083063 Iustin Pop
        elif field == "pip":
1530 a8083063 Iustin Pop
          val = node.primary_ip
1531 a8083063 Iustin Pop
        elif field == "sip":
1532 a8083063 Iustin Pop
          val = node.secondary_ip
1533 130a6a6f Iustin Pop
        elif field == "tags":
1534 130a6a6f Iustin Pop
          val = list(node.GetTags())
1535 38d7239a Iustin Pop
        elif field == "serial_no":
1536 38d7239a Iustin Pop
          val = node.serial_no
1537 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1538 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1539 a8083063 Iustin Pop
        else:
1540 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1541 a8083063 Iustin Pop
        node_output.append(val)
1542 a8083063 Iustin Pop
      output.append(node_output)
1543 a8083063 Iustin Pop
1544 a8083063 Iustin Pop
    return output
1545 a8083063 Iustin Pop
1546 a8083063 Iustin Pop
1547 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1548 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1549 dcb93971 Michael Hanselmann

1550 dcb93971 Michael Hanselmann
  """
1551 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1552 21a15682 Guido Trotter
  REQ_BGL = False
1553 21a15682 Guido Trotter
1554 21a15682 Guido Trotter
  def ExpandNames(self):
1555 21a15682 Guido Trotter
    _CheckOutputFields(static=["node"],
1556 21a15682 Guido Trotter
                       dynamic=["phys", "vg", "name", "size", "instance"],
1557 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1558 21a15682 Guido Trotter
1559 21a15682 Guido Trotter
    self.needed_locks = {}
1560 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1561 21a15682 Guido Trotter
    if not self.op.nodes:
1562 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1563 21a15682 Guido Trotter
    else:
1564 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1565 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1566 dcb93971 Michael Hanselmann
1567 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1568 dcb93971 Michael Hanselmann
    """Check prerequisites.
1569 dcb93971 Michael Hanselmann

1570 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1571 dcb93971 Michael Hanselmann

1572 dcb93971 Michael Hanselmann
    """
1573 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1574 dcb93971 Michael Hanselmann
1575 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1576 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1577 dcb93971 Michael Hanselmann

1578 dcb93971 Michael Hanselmann
    """
1579 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1580 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1581 dcb93971 Michael Hanselmann
1582 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1583 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1584 dcb93971 Michael Hanselmann
1585 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1586 dcb93971 Michael Hanselmann
1587 dcb93971 Michael Hanselmann
    output = []
1588 dcb93971 Michael Hanselmann
    for node in nodenames:
1589 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1590 37d19eb2 Michael Hanselmann
        continue
1591 37d19eb2 Michael Hanselmann
1592 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1593 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1594 dcb93971 Michael Hanselmann
1595 dcb93971 Michael Hanselmann
      for vol in node_vols:
1596 dcb93971 Michael Hanselmann
        node_output = []
1597 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1598 dcb93971 Michael Hanselmann
          if field == "node":
1599 dcb93971 Michael Hanselmann
            val = node
1600 dcb93971 Michael Hanselmann
          elif field == "phys":
1601 dcb93971 Michael Hanselmann
            val = vol['dev']
1602 dcb93971 Michael Hanselmann
          elif field == "vg":
1603 dcb93971 Michael Hanselmann
            val = vol['vg']
1604 dcb93971 Michael Hanselmann
          elif field == "name":
1605 dcb93971 Michael Hanselmann
            val = vol['name']
1606 dcb93971 Michael Hanselmann
          elif field == "size":
1607 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1608 dcb93971 Michael Hanselmann
          elif field == "instance":
1609 dcb93971 Michael Hanselmann
            for inst in ilist:
1610 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1611 dcb93971 Michael Hanselmann
                continue
1612 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1613 dcb93971 Michael Hanselmann
                val = inst.name
1614 dcb93971 Michael Hanselmann
                break
1615 dcb93971 Michael Hanselmann
            else:
1616 dcb93971 Michael Hanselmann
              val = '-'
1617 dcb93971 Michael Hanselmann
          else:
1618 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1619 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1620 dcb93971 Michael Hanselmann
1621 dcb93971 Michael Hanselmann
        output.append(node_output)
1622 dcb93971 Michael Hanselmann
1623 dcb93971 Michael Hanselmann
    return output
1624 dcb93971 Michael Hanselmann
1625 dcb93971 Michael Hanselmann
1626 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1627 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1628 a8083063 Iustin Pop

1629 a8083063 Iustin Pop
  """
1630 a8083063 Iustin Pop
  HPATH = "node-add"
1631 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1632 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1633 a8083063 Iustin Pop
1634 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1635 a8083063 Iustin Pop
    """Build hooks env.
1636 a8083063 Iustin Pop

1637 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1638 a8083063 Iustin Pop

1639 a8083063 Iustin Pop
    """
1640 a8083063 Iustin Pop
    env = {
1641 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1642 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1643 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1644 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1645 a8083063 Iustin Pop
      }
1646 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1647 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1648 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1649 a8083063 Iustin Pop
1650 a8083063 Iustin Pop
  def CheckPrereq(self):
1651 a8083063 Iustin Pop
    """Check prerequisites.
1652 a8083063 Iustin Pop

1653 a8083063 Iustin Pop
    This checks:
1654 a8083063 Iustin Pop
     - the new node is not already in the config
1655 a8083063 Iustin Pop
     - it is resolvable
1656 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1657 a8083063 Iustin Pop

1658 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1659 a8083063 Iustin Pop

1660 a8083063 Iustin Pop
    """
1661 a8083063 Iustin Pop
    node_name = self.op.node_name
1662 a8083063 Iustin Pop
    cfg = self.cfg
1663 a8083063 Iustin Pop
1664 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1665 a8083063 Iustin Pop
1666 bcf043c9 Iustin Pop
    node = dns_data.name
1667 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1668 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1669 a8083063 Iustin Pop
    if secondary_ip is None:
1670 a8083063 Iustin Pop
      secondary_ip = primary_ip
1671 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1672 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1673 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1674 e7c6e02b Michael Hanselmann
1675 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1676 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1677 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1678 e7c6e02b Michael Hanselmann
                                 node)
1679 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1680 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1681 a8083063 Iustin Pop
1682 a8083063 Iustin Pop
    for existing_node_name in node_list:
1683 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1684 e7c6e02b Michael Hanselmann
1685 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1686 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1687 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1688 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1689 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1690 e7c6e02b Michael Hanselmann
        continue
1691 e7c6e02b Michael Hanselmann
1692 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1693 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1694 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1695 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1696 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1697 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1698 a8083063 Iustin Pop
1699 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1700 a8083063 Iustin Pop
    # same as for the master
1701 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
1702 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1703 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1704 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1705 a8083063 Iustin Pop
      if master_singlehomed:
1706 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1707 3ecf6786 Iustin Pop
                                   " new node has one")
1708 a8083063 Iustin Pop
      else:
1709 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1710 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1711 a8083063 Iustin Pop
1712 a8083063 Iustin Pop
    # checks reachablity
1713 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1714 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1715 a8083063 Iustin Pop
1716 a8083063 Iustin Pop
    if not newbie_singlehomed:
1717 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1718 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1719 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1720 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1721 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1722 a8083063 Iustin Pop
1723 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1724 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1725 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1726 a8083063 Iustin Pop
1727 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1728 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1729 a8083063 Iustin Pop

1730 a8083063 Iustin Pop
    """
1731 a8083063 Iustin Pop
    new_node = self.new_node
1732 a8083063 Iustin Pop
    node = new_node.name
1733 a8083063 Iustin Pop
1734 a8083063 Iustin Pop
    # check connectivity
1735 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1736 a8083063 Iustin Pop
    if result:
1737 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1738 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1739 a8083063 Iustin Pop
                    (node, result))
1740 a8083063 Iustin Pop
      else:
1741 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1742 3ecf6786 Iustin Pop
                                 " node version %s" %
1743 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1744 a8083063 Iustin Pop
    else:
1745 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1746 a8083063 Iustin Pop
1747 a8083063 Iustin Pop
    # setup ssh on node
1748 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1749 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1750 a8083063 Iustin Pop
    keyarray = []
1751 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1752 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1753 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1754 a8083063 Iustin Pop
1755 a8083063 Iustin Pop
    for i in keyfiles:
1756 a8083063 Iustin Pop
      f = open(i, 'r')
1757 a8083063 Iustin Pop
      try:
1758 a8083063 Iustin Pop
        keyarray.append(f.read())
1759 a8083063 Iustin Pop
      finally:
1760 a8083063 Iustin Pop
        f.close()
1761 a8083063 Iustin Pop
1762 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1763 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1764 a8083063 Iustin Pop
1765 a8083063 Iustin Pop
    if not result:
1766 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1767 a8083063 Iustin Pop
1768 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1769 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1770 c8a0948f Michael Hanselmann
1771 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1772 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1773 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1774 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1775 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1776 16abfbc2 Alexander Schreiber
                                    10, False):
1777 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1778 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1779 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1780 a8083063 Iustin Pop
1781 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
1782 5c0527ed Guido Trotter
    node_verify_param = {
1783 5c0527ed Guido Trotter
      'nodelist': [node],
1784 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1785 5c0527ed Guido Trotter
    }
1786 5c0527ed Guido Trotter
1787 62c9ec92 Iustin Pop
    result = rpc.call_node_verify(node_verify_list, node_verify_param,
1788 62c9ec92 Iustin Pop
                                  self.cfg.GetClusterName())
1789 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1790 5c0527ed Guido Trotter
      if not result[verifier]:
1791 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1792 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1793 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1794 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1795 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1796 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1797 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1798 ff98055b Iustin Pop
1799 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1800 a8083063 Iustin Pop
    # including the node just added
1801 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
1802 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1803 102b115b Michael Hanselmann
    if not self.op.readd:
1804 102b115b Michael Hanselmann
      dist_nodes.append(node)
1805 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1806 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1807 a8083063 Iustin Pop
1808 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1809 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1810 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1811 a8083063 Iustin Pop
      for to_node in dist_nodes:
1812 a8083063 Iustin Pop
        if not result[to_node]:
1813 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1814 a8083063 Iustin Pop
                       (fname, to_node))
1815 a8083063 Iustin Pop
1816 d6a02168 Michael Hanselmann
    to_copy = []
1817 00cd937c Iustin Pop
    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
1818 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1819 a8083063 Iustin Pop
    for fname in to_copy:
1820 b5602d15 Guido Trotter
      result = rpc.call_upload_file([node], fname)
1821 b5602d15 Guido Trotter
      if not result[node]:
1822 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1823 a8083063 Iustin Pop
1824 d8470559 Michael Hanselmann
    if self.op.readd:
1825 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1826 d8470559 Michael Hanselmann
    else:
1827 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1828 a8083063 Iustin Pop
1829 a8083063 Iustin Pop
1830 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1831 a8083063 Iustin Pop
  """Query cluster configuration.
1832 a8083063 Iustin Pop

1833 a8083063 Iustin Pop
  """
1834 a8083063 Iustin Pop
  _OP_REQP = []
1835 59322403 Iustin Pop
  REQ_MASTER = False
1836 642339cf Guido Trotter
  REQ_BGL = False
1837 642339cf Guido Trotter
1838 642339cf Guido Trotter
  def ExpandNames(self):
1839 642339cf Guido Trotter
    self.needed_locks = {}
1840 a8083063 Iustin Pop
1841 a8083063 Iustin Pop
  def CheckPrereq(self):
1842 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1843 a8083063 Iustin Pop

1844 a8083063 Iustin Pop
    """
1845 a8083063 Iustin Pop
    pass
1846 a8083063 Iustin Pop
1847 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1848 a8083063 Iustin Pop
    """Return cluster config.
1849 a8083063 Iustin Pop

1850 a8083063 Iustin Pop
    """
1851 a8083063 Iustin Pop
    result = {
1852 d6a02168 Michael Hanselmann
      "name": self.cfg.GetClusterName(),
1853 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1854 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1855 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1856 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1857 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1858 d6a02168 Michael Hanselmann
      "master": self.cfg.GetMasterNode(),
1859 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1860 d6a02168 Michael Hanselmann
      "hypervisor_type": self.cfg.GetHypervisorType(),
1861 e69d05fd Iustin Pop
      "enabled_hypervisors": self.cfg.GetClusterInfo().enabled_hypervisors,
1862 a8083063 Iustin Pop
      }
1863 a8083063 Iustin Pop
1864 a8083063 Iustin Pop
    return result
1865 a8083063 Iustin Pop
1866 a8083063 Iustin Pop
1867 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
1868 ae5849b5 Michael Hanselmann
  """Return configuration values.
1869 a8083063 Iustin Pop

1870 a8083063 Iustin Pop
  """
1871 a8083063 Iustin Pop
  _OP_REQP = []
1872 642339cf Guido Trotter
  REQ_BGL = False
1873 642339cf Guido Trotter
1874 642339cf Guido Trotter
  def ExpandNames(self):
1875 642339cf Guido Trotter
    self.needed_locks = {}
1876 a8083063 Iustin Pop
1877 ae5849b5 Michael Hanselmann
    static_fields = ["cluster_name", "master_node"]
1878 ae5849b5 Michael Hanselmann
    _CheckOutputFields(static=static_fields,
1879 ae5849b5 Michael Hanselmann
                       dynamic=[],
1880 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
1881 ae5849b5 Michael Hanselmann
1882 a8083063 Iustin Pop
  def CheckPrereq(self):
1883 a8083063 Iustin Pop
    """No prerequisites.
1884 a8083063 Iustin Pop

1885 a8083063 Iustin Pop
    """
1886 a8083063 Iustin Pop
    pass
1887 a8083063 Iustin Pop
1888 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1889 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1890 a8083063 Iustin Pop

1891 a8083063 Iustin Pop
    """
1892 ae5849b5 Michael Hanselmann
    values = []
1893 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
1894 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
1895 ae5849b5 Michael Hanselmann
        values.append(self.cfg.GetClusterName())
1896 ae5849b5 Michael Hanselmann
      elif field == "master_node":
1897 ae5849b5 Michael Hanselmann
        values.append(self.cfg.GetMasterNode())
1898 ae5849b5 Michael Hanselmann
      else:
1899 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
1900 ae5849b5 Michael Hanselmann
    return values
1901 a8083063 Iustin Pop
1902 a8083063 Iustin Pop
1903 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1904 a8083063 Iustin Pop
  """Bring up an instance's disks.
1905 a8083063 Iustin Pop

1906 a8083063 Iustin Pop
  """
1907 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1908 f22a8ba3 Guido Trotter
  REQ_BGL = False
1909 f22a8ba3 Guido Trotter
1910 f22a8ba3 Guido Trotter
  def ExpandNames(self):
1911 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
1912 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
1913 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1914 f22a8ba3 Guido Trotter
1915 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
1916 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
1917 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
1918 a8083063 Iustin Pop
1919 a8083063 Iustin Pop
  def CheckPrereq(self):
1920 a8083063 Iustin Pop
    """Check prerequisites.
1921 a8083063 Iustin Pop

1922 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1923 a8083063 Iustin Pop

1924 a8083063 Iustin Pop
    """
1925 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1926 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
1927 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
1928 a8083063 Iustin Pop
1929 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1930 a8083063 Iustin Pop
    """Activate the disks.
1931 a8083063 Iustin Pop

1932 a8083063 Iustin Pop
    """
1933 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1934 a8083063 Iustin Pop
    if not disks_ok:
1935 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1936 a8083063 Iustin Pop
1937 a8083063 Iustin Pop
    return disks_info
1938 a8083063 Iustin Pop
1939 a8083063 Iustin Pop
1940 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1941 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1942 a8083063 Iustin Pop

1943 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1944 a8083063 Iustin Pop

1945 a8083063 Iustin Pop
  Args:
1946 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1947 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1948 a8083063 Iustin Pop
                        in an error return from the function
1949 a8083063 Iustin Pop

1950 a8083063 Iustin Pop
  Returns:
1951 a8083063 Iustin Pop
    false if the operation failed
1952 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1953 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1954 a8083063 Iustin Pop
  """
1955 a8083063 Iustin Pop
  device_info = []
1956 a8083063 Iustin Pop
  disks_ok = True
1957 fdbd668d Iustin Pop
  iname = instance.name
1958 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1959 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1960 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1961 fdbd668d Iustin Pop
1962 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1963 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1964 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1965 fdbd668d Iustin Pop
  # SyncSource, etc.)
1966 fdbd668d Iustin Pop
1967 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1968 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1969 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1970 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1971 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1972 a8083063 Iustin Pop
      if not result:
1973 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1974 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1975 fdbd668d Iustin Pop
        if not ignore_secondaries:
1976 a8083063 Iustin Pop
          disks_ok = False
1977 fdbd668d Iustin Pop
1978 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1979 fdbd668d Iustin Pop
1980 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1981 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1982 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1983 fdbd668d Iustin Pop
      if node != instance.primary_node:
1984 fdbd668d Iustin Pop
        continue
1985 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
1986 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1987 fdbd668d Iustin Pop
      if not result:
1988 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1989 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1990 fdbd668d Iustin Pop
        disks_ok = False
1991 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1992 a8083063 Iustin Pop
1993 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1994 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1995 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1996 b352ab5b Iustin Pop
  for disk in instance.disks:
1997 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1998 b352ab5b Iustin Pop
1999 a8083063 Iustin Pop
  return disks_ok, device_info
2000 a8083063 Iustin Pop
2001 a8083063 Iustin Pop
2002 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
2003 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2004 3ecf6786 Iustin Pop

2005 3ecf6786 Iustin Pop
  """
2006 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
2007 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2008 fe7b0351 Michael Hanselmann
  if not disks_ok:
2009 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
2010 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2011 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
2012 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
2013 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2014 fe7b0351 Michael Hanselmann
2015 fe7b0351 Michael Hanselmann
2016 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2017 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2018 a8083063 Iustin Pop

2019 a8083063 Iustin Pop
  """
2020 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2021 f22a8ba3 Guido Trotter
  REQ_BGL = False
2022 f22a8ba3 Guido Trotter
2023 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2024 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2025 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2026 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2027 f22a8ba3 Guido Trotter
2028 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2029 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2030 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2031 a8083063 Iustin Pop
2032 a8083063 Iustin Pop
  def CheckPrereq(self):
2033 a8083063 Iustin Pop
    """Check prerequisites.
2034 a8083063 Iustin Pop

2035 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2036 a8083063 Iustin Pop

2037 a8083063 Iustin Pop
    """
2038 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2039 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2040 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2041 a8083063 Iustin Pop
2042 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2043 a8083063 Iustin Pop
    """Deactivate the disks
2044 a8083063 Iustin Pop

2045 a8083063 Iustin Pop
    """
2046 a8083063 Iustin Pop
    instance = self.instance
2047 155d6c75 Guido Trotter
    _SafeShutdownInstanceDisks(instance, self.cfg)
2048 a8083063 Iustin Pop
2049 a8083063 Iustin Pop
2050 155d6c75 Guido Trotter
def _SafeShutdownInstanceDisks(instance, cfg):
2051 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2052 155d6c75 Guido Trotter

2053 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2054 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2055 155d6c75 Guido Trotter

2056 155d6c75 Guido Trotter
  """
2057 e69d05fd Iustin Pop
  ins_l = rpc.call_instance_list([instance.primary_node],
2058 e69d05fd Iustin Pop
                                 [instance.hypervisor])
2059 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2060 155d6c75 Guido Trotter
  if not type(ins_l) is list:
2061 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2062 155d6c75 Guido Trotter
                             instance.primary_node)
2063 155d6c75 Guido Trotter
2064 155d6c75 Guido Trotter
  if instance.name in ins_l:
2065 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2066 155d6c75 Guido Trotter
                             " block devices.")
2067 155d6c75 Guido Trotter
2068 155d6c75 Guido Trotter
  _ShutdownInstanceDisks(instance, cfg)
2069 a8083063 Iustin Pop
2070 a8083063 Iustin Pop
2071 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
2072 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2073 a8083063 Iustin Pop

2074 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2075 a8083063 Iustin Pop

2076 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2077 a8083063 Iustin Pop
  ignored.
2078 a8083063 Iustin Pop

2079 a8083063 Iustin Pop
  """
2080 a8083063 Iustin Pop
  result = True
2081 a8083063 Iustin Pop
  for disk in instance.disks:
2082 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2083 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
2084 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
2085 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
2086 a8083063 Iustin Pop
                     (disk.iv_name, node))
2087 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2088 a8083063 Iustin Pop
          result = False
2089 a8083063 Iustin Pop
  return result
2090 a8083063 Iustin Pop
2091 a8083063 Iustin Pop
2092 e69d05fd Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested, hypervisor):
2093 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2094 d4f16fd9 Iustin Pop

2095 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2096 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2097 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2098 d4f16fd9 Iustin Pop
  exception.
2099 d4f16fd9 Iustin Pop

2100 e69d05fd Iustin Pop
  @type cfg: C{config.ConfigWriter}
2101 e69d05fd Iustin Pop
  @param cfg: the ConfigWriter instance from which we get configuration data
2102 e69d05fd Iustin Pop
  @type node: C{str}
2103 e69d05fd Iustin Pop
  @param node: the node to check
2104 e69d05fd Iustin Pop
  @type reason: C{str}
2105 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2106 e69d05fd Iustin Pop
  @type requested: C{int}
2107 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2108 e69d05fd Iustin Pop
  @type hypervisor: C{str}
2109 e69d05fd Iustin Pop
  @param hypervisor: the hypervisor to ask for memory stats
2110 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2111 e69d05fd Iustin Pop
      we cannot check the node
2112 d4f16fd9 Iustin Pop

2113 d4f16fd9 Iustin Pop
  """
2114 e69d05fd Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName(), hypervisor)
2115 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2116 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2117 d4f16fd9 Iustin Pop
                             " information" % (node,))
2118 d4f16fd9 Iustin Pop
2119 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2120 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2121 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2122 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2123 d4f16fd9 Iustin Pop
  if requested > free_mem:
2124 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2125 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2126 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2127 d4f16fd9 Iustin Pop
2128 d4f16fd9 Iustin Pop
2129 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2130 a8083063 Iustin Pop
  """Starts an instance.
2131 a8083063 Iustin Pop

2132 a8083063 Iustin Pop
  """
2133 a8083063 Iustin Pop
  HPATH = "instance-start"
2134 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2135 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2136 e873317a Guido Trotter
  REQ_BGL = False
2137 e873317a Guido Trotter
2138 e873317a Guido Trotter
  def ExpandNames(self):
2139 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2140 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2141 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2142 e873317a Guido Trotter
2143 e873317a Guido Trotter
  def DeclareLocks(self, level):
2144 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2145 e873317a Guido Trotter
      self._LockInstancesNodes()
2146 a8083063 Iustin Pop
2147 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2148 a8083063 Iustin Pop
    """Build hooks env.
2149 a8083063 Iustin Pop

2150 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2151 a8083063 Iustin Pop

2152 a8083063 Iustin Pop
    """
2153 a8083063 Iustin Pop
    env = {
2154 a8083063 Iustin Pop
      "FORCE": self.op.force,
2155 a8083063 Iustin Pop
      }
2156 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2157 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2158 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2159 a8083063 Iustin Pop
    return env, nl, nl
2160 a8083063 Iustin Pop
2161 a8083063 Iustin Pop
  def CheckPrereq(self):
2162 a8083063 Iustin Pop
    """Check prerequisites.
2163 a8083063 Iustin Pop

2164 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2165 a8083063 Iustin Pop

2166 a8083063 Iustin Pop
    """
2167 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2168 e873317a Guido Trotter
    assert self.instance is not None, \
2169 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2170 a8083063 Iustin Pop
2171 a8083063 Iustin Pop
    # check bridges existance
2172 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2173 a8083063 Iustin Pop
2174 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2175 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2176 e69d05fd Iustin Pop
                         instance.memory, instance.hypervisor)
2177 d4f16fd9 Iustin Pop
2178 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2179 a8083063 Iustin Pop
    """Start the instance.
2180 a8083063 Iustin Pop

2181 a8083063 Iustin Pop
    """
2182 a8083063 Iustin Pop
    instance = self.instance
2183 a8083063 Iustin Pop
    force = self.op.force
2184 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2185 a8083063 Iustin Pop
2186 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2187 fe482621 Iustin Pop
2188 a8083063 Iustin Pop
    node_current = instance.primary_node
2189 a8083063 Iustin Pop
2190 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2191 a8083063 Iustin Pop
2192 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2193 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2194 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2195 a8083063 Iustin Pop
2196 a8083063 Iustin Pop
2197 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2198 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2199 bf6929a2 Alexander Schreiber

2200 bf6929a2 Alexander Schreiber
  """
2201 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2202 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2203 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2204 e873317a Guido Trotter
  REQ_BGL = False
2205 e873317a Guido Trotter
2206 e873317a Guido Trotter
  def ExpandNames(self):
2207 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2208 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2209 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2210 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2211 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2212 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2213 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2214 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2215 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2216 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2217 e873317a Guido Trotter
2218 e873317a Guido Trotter
  def DeclareLocks(self, level):
2219 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2220 849da276 Guido Trotter
      primary_only = not constants.INSTANCE_REBOOT_FULL
2221 849da276 Guido Trotter
      self._LockInstancesNodes(primary_only=primary_only)
2222 bf6929a2 Alexander Schreiber
2223 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2224 bf6929a2 Alexander Schreiber
    """Build hooks env.
2225 bf6929a2 Alexander Schreiber

2226 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2227 bf6929a2 Alexander Schreiber

2228 bf6929a2 Alexander Schreiber
    """
2229 bf6929a2 Alexander Schreiber
    env = {
2230 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2231 bf6929a2 Alexander Schreiber
      }
2232 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2233 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2234 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2235 bf6929a2 Alexander Schreiber
    return env, nl, nl
2236 bf6929a2 Alexander Schreiber
2237 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2238 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2239 bf6929a2 Alexander Schreiber

2240 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2241 bf6929a2 Alexander Schreiber

2242 bf6929a2 Alexander Schreiber
    """
2243 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2244 e873317a Guido Trotter
    assert self.instance is not None, \
2245 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2246 bf6929a2 Alexander Schreiber
2247 bf6929a2 Alexander Schreiber
    # check bridges existance
2248 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2249 bf6929a2 Alexander Schreiber
2250 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2251 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2252 bf6929a2 Alexander Schreiber

2253 bf6929a2 Alexander Schreiber
    """
2254 bf6929a2 Alexander Schreiber
    instance = self.instance
2255 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2256 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2257 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2258 bf6929a2 Alexander Schreiber
2259 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2260 bf6929a2 Alexander Schreiber
2261 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2262 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2263 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2264 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2265 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2266 bf6929a2 Alexander Schreiber
    else:
2267 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2268 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2269 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2270 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2271 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2272 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2273 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2274 bf6929a2 Alexander Schreiber
2275 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2276 bf6929a2 Alexander Schreiber
2277 bf6929a2 Alexander Schreiber
2278 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2279 a8083063 Iustin Pop
  """Shutdown an instance.
2280 a8083063 Iustin Pop

2281 a8083063 Iustin Pop
  """
2282 a8083063 Iustin Pop
  HPATH = "instance-stop"
2283 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2284 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2285 e873317a Guido Trotter
  REQ_BGL = False
2286 e873317a Guido Trotter
2287 e873317a Guido Trotter
  def ExpandNames(self):
2288 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2289 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2290 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2291 e873317a Guido Trotter
2292 e873317a Guido Trotter
  def DeclareLocks(self, level):
2293 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2294 e873317a Guido Trotter
      self._LockInstancesNodes()
2295 a8083063 Iustin Pop
2296 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2297 a8083063 Iustin Pop
    """Build hooks env.
2298 a8083063 Iustin Pop

2299 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2300 a8083063 Iustin Pop

2301 a8083063 Iustin Pop
    """
2302 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2303 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2304 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2305 a8083063 Iustin Pop
    return env, nl, nl
2306 a8083063 Iustin Pop
2307 a8083063 Iustin Pop
  def CheckPrereq(self):
2308 a8083063 Iustin Pop
    """Check prerequisites.
2309 a8083063 Iustin Pop

2310 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2311 a8083063 Iustin Pop

2312 a8083063 Iustin Pop
    """
2313 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2314 e873317a Guido Trotter
    assert self.instance is not None, \
2315 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2316 a8083063 Iustin Pop
2317 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2318 a8083063 Iustin Pop
    """Shutdown the instance.
2319 a8083063 Iustin Pop

2320 a8083063 Iustin Pop
    """
2321 a8083063 Iustin Pop
    instance = self.instance
2322 a8083063 Iustin Pop
    node_current = instance.primary_node
2323 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2324 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2325 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2326 a8083063 Iustin Pop
2327 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2328 a8083063 Iustin Pop
2329 a8083063 Iustin Pop
2330 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2331 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2332 fe7b0351 Michael Hanselmann

2333 fe7b0351 Michael Hanselmann
  """
2334 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2335 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2336 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2337 4e0b4d2d Guido Trotter
  REQ_BGL = False
2338 4e0b4d2d Guido Trotter
2339 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2340 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2341 4e0b4d2d Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2342 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2343 4e0b4d2d Guido Trotter
2344 4e0b4d2d Guido Trotter
  def DeclareLocks(self, level):
2345 4e0b4d2d Guido Trotter
    if level == locking.LEVEL_NODE:
2346 4e0b4d2d Guido Trotter
      self._LockInstancesNodes()
2347 fe7b0351 Michael Hanselmann
2348 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2349 fe7b0351 Michael Hanselmann
    """Build hooks env.
2350 fe7b0351 Michael Hanselmann

2351 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2352 fe7b0351 Michael Hanselmann

2353 fe7b0351 Michael Hanselmann
    """
2354 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2355 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2356 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2357 fe7b0351 Michael Hanselmann
    return env, nl, nl
2358 fe7b0351 Michael Hanselmann
2359 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2360 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2361 fe7b0351 Michael Hanselmann

2362 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2363 fe7b0351 Michael Hanselmann

2364 fe7b0351 Michael Hanselmann
    """
2365 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2366 4e0b4d2d Guido Trotter
    assert instance is not None, \
2367 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2368 4e0b4d2d Guido Trotter
2369 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2370 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2371 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2372 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2373 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2374 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2375 e69d05fd Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name,
2376 e69d05fd Iustin Pop
                                         instance.hypervisor)
2377 fe7b0351 Michael Hanselmann
    if remote_info:
2378 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2379 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2380 3ecf6786 Iustin Pop
                                  instance.primary_node))
2381 d0834de3 Michael Hanselmann
2382 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2383 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2384 d0834de3 Michael Hanselmann
      # OS verification
2385 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2386 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2387 d0834de3 Michael Hanselmann
      if pnode is None:
2388 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2389 3ecf6786 Iustin Pop
                                   self.op.pnode)
2390 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2391 dfa96ded Guido Trotter
      if not os_obj:
2392 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2393 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2394 d0834de3 Michael Hanselmann
2395 fe7b0351 Michael Hanselmann
    self.instance = instance
2396 fe7b0351 Michael Hanselmann
2397 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2398 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2399 fe7b0351 Michael Hanselmann

2400 fe7b0351 Michael Hanselmann
    """
2401 fe7b0351 Michael Hanselmann
    inst = self.instance
2402 fe7b0351 Michael Hanselmann
2403 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2404 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2405 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2406 97abc79f Iustin Pop
      self.cfg.Update(inst)
2407 d0834de3 Michael Hanselmann
2408 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2409 fe7b0351 Michael Hanselmann
    try:
2410 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2411 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2412 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2413 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2414 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2415 fe7b0351 Michael Hanselmann
    finally:
2416 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2417 fe7b0351 Michael Hanselmann
2418 fe7b0351 Michael Hanselmann
2419 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2420 decd5f45 Iustin Pop
  """Rename an instance.
2421 decd5f45 Iustin Pop

2422 decd5f45 Iustin Pop
  """
2423 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2424 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2425 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2426 decd5f45 Iustin Pop
2427 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2428 decd5f45 Iustin Pop
    """Build hooks env.
2429 decd5f45 Iustin Pop

2430 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2431 decd5f45 Iustin Pop

2432 decd5f45 Iustin Pop
    """
2433 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2434 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2435 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2436 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2437 decd5f45 Iustin Pop
    return env, nl, nl
2438 decd5f45 Iustin Pop
2439 decd5f45 Iustin Pop
  def CheckPrereq(self):
2440 decd5f45 Iustin Pop
    """Check prerequisites.
2441 decd5f45 Iustin Pop

2442 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2443 decd5f45 Iustin Pop

2444 decd5f45 Iustin Pop
    """
2445 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2446 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2447 decd5f45 Iustin Pop
    if instance is None:
2448 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2449 decd5f45 Iustin Pop
                                 self.op.instance_name)
2450 decd5f45 Iustin Pop
    if instance.status != "down":
2451 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2452 decd5f45 Iustin Pop
                                 self.op.instance_name)
2453 e69d05fd Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name,
2454 e69d05fd Iustin Pop
                                         instance.hypervisor)
2455 decd5f45 Iustin Pop
    if remote_info:
2456 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2457 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2458 decd5f45 Iustin Pop
                                  instance.primary_node))
2459 decd5f45 Iustin Pop
    self.instance = instance
2460 decd5f45 Iustin Pop
2461 decd5f45 Iustin Pop
    # new name verification
2462 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2463 decd5f45 Iustin Pop
2464 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2465 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2466 7bde3275 Guido Trotter
    if new_name in instance_list:
2467 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2468 c09f363f Manuel Franceschini
                                 new_name)
2469 7bde3275 Guido Trotter
2470 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2471 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2472 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2473 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2474 decd5f45 Iustin Pop
2475 decd5f45 Iustin Pop
2476 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2477 decd5f45 Iustin Pop
    """Reinstall the instance.
2478 decd5f45 Iustin Pop

2479 decd5f45 Iustin Pop
    """
2480 decd5f45 Iustin Pop
    inst = self.instance
2481 decd5f45 Iustin Pop
    old_name = inst.name
2482 decd5f45 Iustin Pop
2483 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2484 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2485 b23c4333 Manuel Franceschini
2486 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2487 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2488 74b5913f Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
2489 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2490 decd5f45 Iustin Pop
2491 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2492 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2493 decd5f45 Iustin Pop
2494 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2495 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2496 b23c4333 Manuel Franceschini
      result = rpc.call_file_storage_dir_rename(inst.primary_node,
2497 b23c4333 Manuel Franceschini
                                                old_file_storage_dir,
2498 b23c4333 Manuel Franceschini
                                                new_file_storage_dir)
2499 b23c4333 Manuel Franceschini
2500 b23c4333 Manuel Franceschini
      if not result:
2501 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2502 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2503 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2504 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2505 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2506 b23c4333 Manuel Franceschini
2507 b23c4333 Manuel Franceschini
      if not result[0]:
2508 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2509 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2510 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2511 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2512 b23c4333 Manuel Franceschini
2513 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2514 decd5f45 Iustin Pop
    try:
2515 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2516 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2517 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2518 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2519 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2520 decd5f45 Iustin Pop
        logger.Error(msg)
2521 decd5f45 Iustin Pop
    finally:
2522 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2523 decd5f45 Iustin Pop
2524 decd5f45 Iustin Pop
2525 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2526 a8083063 Iustin Pop
  """Remove an instance.
2527 a8083063 Iustin Pop

2528 a8083063 Iustin Pop
  """
2529 a8083063 Iustin Pop
  HPATH = "instance-remove"
2530 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2531 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2532 cf472233 Guido Trotter
  REQ_BGL = False
2533 cf472233 Guido Trotter
2534 cf472233 Guido Trotter
  def ExpandNames(self):
2535 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
2536 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2537 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2538 cf472233 Guido Trotter
2539 cf472233 Guido Trotter
  def DeclareLocks(self, level):
2540 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
2541 cf472233 Guido Trotter
      self._LockInstancesNodes()
2542 a8083063 Iustin Pop
2543 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2544 a8083063 Iustin Pop
    """Build hooks env.
2545 a8083063 Iustin Pop

2546 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2547 a8083063 Iustin Pop

2548 a8083063 Iustin Pop
    """
2549 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2550 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
2551 a8083063 Iustin Pop
    return env, nl, nl
2552 a8083063 Iustin Pop
2553 a8083063 Iustin Pop
  def CheckPrereq(self):
2554 a8083063 Iustin Pop
    """Check prerequisites.
2555 a8083063 Iustin Pop

2556 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2557 a8083063 Iustin Pop

2558 a8083063 Iustin Pop
    """
2559 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2560 cf472233 Guido Trotter
    assert self.instance is not None, \
2561 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2562 a8083063 Iustin Pop
2563 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2564 a8083063 Iustin Pop
    """Remove the instance.
2565 a8083063 Iustin Pop

2566 a8083063 Iustin Pop
    """
2567 a8083063 Iustin Pop
    instance = self.instance
2568 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2569 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2570 a8083063 Iustin Pop
2571 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2572 1d67656e Iustin Pop
      if self.op.ignore_failures:
2573 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2574 1d67656e Iustin Pop
      else:
2575 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2576 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2577 a8083063 Iustin Pop
2578 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2579 a8083063 Iustin Pop
2580 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2581 1d67656e Iustin Pop
      if self.op.ignore_failures:
2582 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2583 1d67656e Iustin Pop
      else:
2584 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2585 a8083063 Iustin Pop
2586 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2587 a8083063 Iustin Pop
2588 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2589 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2590 a8083063 Iustin Pop
2591 a8083063 Iustin Pop
2592 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2593 a8083063 Iustin Pop
  """Logical unit for querying instances.
2594 a8083063 Iustin Pop

2595 a8083063 Iustin Pop
  """
2596 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2597 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2598 a8083063 Iustin Pop
2599 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2600 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2601 57a2fb91 Iustin Pop
    self.static_fields = frozenset([
2602 57a2fb91 Iustin Pop
      "name", "os", "pnode", "snodes",
2603 57a2fb91 Iustin Pop
      "admin_state", "admin_ram",
2604 57a2fb91 Iustin Pop
      "disk_template", "ip", "mac", "bridge",
2605 57a2fb91 Iustin Pop
      "sda_size", "sdb_size", "vcpus", "tags",
2606 57a2fb91 Iustin Pop
      "network_port", "kernel_path", "initrd_path",
2607 57a2fb91 Iustin Pop
      "hvm_boot_order", "hvm_acpi", "hvm_pae",
2608 57a2fb91 Iustin Pop
      "hvm_cdrom_image_path", "hvm_nic_type",
2609 57a2fb91 Iustin Pop
      "hvm_disk_type", "vnc_bind_address",
2610 e69d05fd Iustin Pop
      "serial_no", "hypervisor",
2611 57a2fb91 Iustin Pop
      ])
2612 57a2fb91 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
2613 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2614 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2615 a8083063 Iustin Pop
2616 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2617 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2618 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2619 7eb9d8f7 Guido Trotter
2620 57a2fb91 Iustin Pop
    if self.op.names:
2621 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
2622 7eb9d8f7 Guido Trotter
    else:
2623 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
2624 7eb9d8f7 Guido Trotter
2625 57a2fb91 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
2626 57a2fb91 Iustin Pop
    if self.do_locking:
2627 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2628 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
2629 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2630 7eb9d8f7 Guido Trotter
2631 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2632 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
2633 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2634 7eb9d8f7 Guido Trotter
2635 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2636 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2637 7eb9d8f7 Guido Trotter

2638 7eb9d8f7 Guido Trotter
    """
2639 57a2fb91 Iustin Pop
    pass
2640 069dcc86 Iustin Pop
2641 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2642 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2643 a8083063 Iustin Pop

2644 a8083063 Iustin Pop
    """
2645 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
2646 57a2fb91 Iustin Pop
    if self.do_locking:
2647 57a2fb91 Iustin Pop
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2648 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2649 3fa93523 Guido Trotter
      instance_names = self.wanted
2650 3fa93523 Guido Trotter
      missing = set(instance_names).difference(all_info.keys())
2651 3fa93523 Guido Trotter
      if missing:
2652 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2653 3fa93523 Guido Trotter
          "Some instances were removed before retrieving their data: %s"
2654 3fa93523 Guido Trotter
          % missing)
2655 57a2fb91 Iustin Pop
    else:
2656 57a2fb91 Iustin Pop
      instance_names = all_info.keys()
2657 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
2658 a8083063 Iustin Pop
2659 a8083063 Iustin Pop
    # begin data gathering
2660 a8083063 Iustin Pop
2661 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2662 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
2663 a8083063 Iustin Pop
2664 a8083063 Iustin Pop
    bad_nodes = []
2665 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2666 a8083063 Iustin Pop
      live_data = {}
2667 e69d05fd Iustin Pop
      node_data = rpc.call_all_instances_info(nodes, hv_list)
2668 a8083063 Iustin Pop
      for name in nodes:
2669 a8083063 Iustin Pop
        result = node_data[name]
2670 a8083063 Iustin Pop
        if result:
2671 a8083063 Iustin Pop
          live_data.update(result)
2672 a8083063 Iustin Pop
        elif result == False:
2673 a8083063 Iustin Pop
          bad_nodes.append(name)
2674 a8083063 Iustin Pop
        # else no instance is alive
2675 a8083063 Iustin Pop
    else:
2676 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2677 a8083063 Iustin Pop
2678 a8083063 Iustin Pop
    # end data gathering
2679 a8083063 Iustin Pop
2680 a8083063 Iustin Pop
    output = []
2681 a8083063 Iustin Pop
    for instance in instance_list:
2682 a8083063 Iustin Pop
      iout = []
2683 a8083063 Iustin Pop
      for field in self.op.output_fields:
2684 a8083063 Iustin Pop
        if field == "name":
2685 a8083063 Iustin Pop
          val = instance.name
2686 a8083063 Iustin Pop
        elif field == "os":
2687 a8083063 Iustin Pop
          val = instance.os
2688 a8083063 Iustin Pop
        elif field == "pnode":
2689 a8083063 Iustin Pop
          val = instance.primary_node
2690 a8083063 Iustin Pop
        elif field == "snodes":
2691 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2692 a8083063 Iustin Pop
        elif field == "admin_state":
2693 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2694 a8083063 Iustin Pop
        elif field == "oper_state":
2695 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2696 8a23d2d3 Iustin Pop
            val = None
2697 a8083063 Iustin Pop
          else:
2698 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2699 d8052456 Iustin Pop
        elif field == "status":
2700 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2701 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2702 d8052456 Iustin Pop
          else:
2703 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2704 d8052456 Iustin Pop
            if running:
2705 d8052456 Iustin Pop
              if instance.status != "down":
2706 d8052456 Iustin Pop
                val = "running"
2707 d8052456 Iustin Pop
              else:
2708 d8052456 Iustin Pop
                val = "ERROR_up"
2709 d8052456 Iustin Pop
            else:
2710 d8052456 Iustin Pop
              if instance.status != "down":
2711 d8052456 Iustin Pop
                val = "ERROR_down"
2712 d8052456 Iustin Pop
              else:
2713 d8052456 Iustin Pop
                val = "ADMIN_down"
2714 a8083063 Iustin Pop
        elif field == "admin_ram":
2715 a8083063 Iustin Pop
          val = instance.memory
2716 a8083063 Iustin Pop
        elif field == "oper_ram":
2717 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2718 8a23d2d3 Iustin Pop
            val = None
2719 a8083063 Iustin Pop
          elif instance.name in live_data:
2720 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2721 a8083063 Iustin Pop
          else:
2722 a8083063 Iustin Pop
            val = "-"
2723 a8083063 Iustin Pop
        elif field == "disk_template":
2724 a8083063 Iustin Pop
          val = instance.disk_template
2725 a8083063 Iustin Pop
        elif field == "ip":
2726 a8083063 Iustin Pop
          val = instance.nics[0].ip
2727 a8083063 Iustin Pop
        elif field == "bridge":
2728 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2729 a8083063 Iustin Pop
        elif field == "mac":
2730 a8083063 Iustin Pop
          val = instance.nics[0].mac
2731 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2732 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2733 644eeef9 Iustin Pop
          if disk is None:
2734 8a23d2d3 Iustin Pop
            val = None
2735 644eeef9 Iustin Pop
          else:
2736 644eeef9 Iustin Pop
            val = disk.size
2737 d6d415e8 Iustin Pop
        elif field == "vcpus":
2738 d6d415e8 Iustin Pop
          val = instance.vcpus
2739 130a6a6f Iustin Pop
        elif field == "tags":
2740 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2741 38d7239a Iustin Pop
        elif field == "serial_no":
2742 38d7239a Iustin Pop
          val = instance.serial_no
2743 3fb1e1c5 Alexander Schreiber
        elif field in ("network_port", "kernel_path", "initrd_path",
2744 3fb1e1c5 Alexander Schreiber
                       "hvm_boot_order", "hvm_acpi", "hvm_pae",
2745 3fb1e1c5 Alexander Schreiber
                       "hvm_cdrom_image_path", "hvm_nic_type",
2746 3fb1e1c5 Alexander Schreiber
                       "hvm_disk_type", "vnc_bind_address"):
2747 3fb1e1c5 Alexander Schreiber
          val = getattr(instance, field, None)
2748 3fb1e1c5 Alexander Schreiber
          if val is not None:
2749 3fb1e1c5 Alexander Schreiber
            pass
2750 3fb1e1c5 Alexander Schreiber
          elif field in ("hvm_nic_type", "hvm_disk_type",
2751 3fb1e1c5 Alexander Schreiber
                         "kernel_path", "initrd_path"):
2752 3fb1e1c5 Alexander Schreiber
            val = "default"
2753 3fb1e1c5 Alexander Schreiber
          else:
2754 3fb1e1c5 Alexander Schreiber
            val = "-"
2755 e69d05fd Iustin Pop
        elif field == "hypervisor":
2756 e69d05fd Iustin Pop
          val = instance.hypervisor
2757 a8083063 Iustin Pop
        else:
2758 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2759 a8083063 Iustin Pop
        iout.append(val)
2760 a8083063 Iustin Pop
      output.append(iout)
2761 a8083063 Iustin Pop
2762 a8083063 Iustin Pop
    return output
2763 a8083063 Iustin Pop
2764 a8083063 Iustin Pop
2765 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2766 a8083063 Iustin Pop
  """Failover an instance.
2767 a8083063 Iustin Pop

2768 a8083063 Iustin Pop
  """
2769 a8083063 Iustin Pop
  HPATH = "instance-failover"
2770 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2771 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2772 c9e5c064 Guido Trotter
  REQ_BGL = False
2773 c9e5c064 Guido Trotter
2774 c9e5c064 Guido Trotter
  def ExpandNames(self):
2775 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2776 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2777 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2778 c9e5c064 Guido Trotter
2779 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2780 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2781 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2782 a8083063 Iustin Pop
2783 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2784 a8083063 Iustin Pop
    """Build hooks env.
2785 a8083063 Iustin Pop

2786 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2787 a8083063 Iustin Pop

2788 a8083063 Iustin Pop
    """
2789 a8083063 Iustin Pop
    env = {
2790 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2791 a8083063 Iustin Pop
      }
2792 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2793 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
2794 a8083063 Iustin Pop
    return env, nl, nl
2795 a8083063 Iustin Pop
2796 a8083063 Iustin Pop
  def CheckPrereq(self):
2797 a8083063 Iustin Pop
    """Check prerequisites.
2798 a8083063 Iustin Pop

2799 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2800 a8083063 Iustin Pop

2801 a8083063 Iustin Pop
    """
2802 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2803 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2804 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2805 a8083063 Iustin Pop
2806 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2807 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2808 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2809 2a710df1 Michael Hanselmann
2810 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2811 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2812 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2813 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2814 2a710df1 Michael Hanselmann
2815 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2816 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2817 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2818 e69d05fd Iustin Pop
                         instance.name, instance.memory,
2819 e69d05fd Iustin Pop
                         instance.hypervisor)
2820 3a7c308e Guido Trotter
2821 a8083063 Iustin Pop
    # check bridge existance
2822 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2823 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2824 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2825 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2826 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2827 a8083063 Iustin Pop
2828 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2829 a8083063 Iustin Pop
    """Failover an instance.
2830 a8083063 Iustin Pop

2831 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2832 a8083063 Iustin Pop
    starting it on the secondary.
2833 a8083063 Iustin Pop

2834 a8083063 Iustin Pop
    """
2835 a8083063 Iustin Pop
    instance = self.instance
2836 a8083063 Iustin Pop
2837 a8083063 Iustin Pop
    source_node = instance.primary_node
2838 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2839 a8083063 Iustin Pop
2840 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2841 a8083063 Iustin Pop
    for dev in instance.disks:
2842 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2843 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2844 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2845 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2846 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2847 a8083063 Iustin Pop
2848 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2849 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2850 a8083063 Iustin Pop
                (instance.name, source_node))
2851 a8083063 Iustin Pop
2852 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2853 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2854 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2855 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2856 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2857 24a40d57 Iustin Pop
      else:
2858 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2859 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2860 a8083063 Iustin Pop
2861 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2862 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2863 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2864 a8083063 Iustin Pop
2865 a8083063 Iustin Pop
    instance.primary_node = target_node
2866 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2867 b6102dab Guido Trotter
    self.cfg.Update(instance)
2868 a8083063 Iustin Pop
2869 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2870 12a0cfbe Guido Trotter
    if instance.status == "up":
2871 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2872 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2873 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2874 12a0cfbe Guido Trotter
2875 12a0cfbe Guido Trotter
      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2876 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2877 12a0cfbe Guido Trotter
      if not disks_ok:
2878 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2879 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2880 a8083063 Iustin Pop
2881 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2882 12a0cfbe Guido Trotter
      if not rpc.call_instance_start(target_node, instance, None):
2883 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2884 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2885 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2886 a8083063 Iustin Pop
2887 a8083063 Iustin Pop
2888 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2889 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2890 a8083063 Iustin Pop

2891 a8083063 Iustin Pop
  This always creates all devices.
2892 a8083063 Iustin Pop

2893 a8083063 Iustin Pop
  """
2894 a8083063 Iustin Pop
  if device.children:
2895 a8083063 Iustin Pop
    for child in device.children:
2896 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2897 a8083063 Iustin Pop
        return False
2898 a8083063 Iustin Pop
2899 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2900 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2901 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2902 a8083063 Iustin Pop
  if not new_id:
2903 a8083063 Iustin Pop
    return False
2904 a8083063 Iustin Pop
  if device.physical_id is None:
2905 a8083063 Iustin Pop
    device.physical_id = new_id
2906 a8083063 Iustin Pop
  return True
2907 a8083063 Iustin Pop
2908 a8083063 Iustin Pop
2909 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2910 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2911 a8083063 Iustin Pop

2912 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2913 a8083063 Iustin Pop
  all its children.
2914 a8083063 Iustin Pop

2915 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2916 a8083063 Iustin Pop

2917 a8083063 Iustin Pop
  """
2918 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2919 a8083063 Iustin Pop
    force = True
2920 a8083063 Iustin Pop
  if device.children:
2921 a8083063 Iustin Pop
    for child in device.children:
2922 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2923 3f78eef2 Iustin Pop
                                        child, force, info):
2924 a8083063 Iustin Pop
        return False
2925 a8083063 Iustin Pop
2926 a8083063 Iustin Pop
  if not force:
2927 a8083063 Iustin Pop
    return True
2928 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2929 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2930 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2931 a8083063 Iustin Pop
  if not new_id:
2932 a8083063 Iustin Pop
    return False
2933 a8083063 Iustin Pop
  if device.physical_id is None:
2934 a8083063 Iustin Pop
    device.physical_id = new_id
2935 a8083063 Iustin Pop
  return True
2936 a8083063 Iustin Pop
2937 a8083063 Iustin Pop
2938 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2939 923b1523 Iustin Pop
  """Generate a suitable LV name.
2940 923b1523 Iustin Pop

2941 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2942 923b1523 Iustin Pop

2943 923b1523 Iustin Pop
  """
2944 923b1523 Iustin Pop
  results = []
2945 923b1523 Iustin Pop
  for val in exts:
2946 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2947 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2948 923b1523 Iustin Pop
  return results
2949 923b1523 Iustin Pop
2950 923b1523 Iustin Pop
2951 ffa1c0dc Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name,
2952 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
2953 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2954 a1f445d3 Iustin Pop

2955 a1f445d3 Iustin Pop
  """
2956 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2957 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2958 f9518d38 Iustin Pop
  shared_secret = cfg.GenerateDRBDSecret()
2959 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2960 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2961 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2962 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2963 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2964 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
2965 f9518d38 Iustin Pop
                                      p_minor, s_minor,
2966 f9518d38 Iustin Pop
                                      shared_secret),
2967 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
2968 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2969 a1f445d3 Iustin Pop
  return drbd_dev
2970 a1f445d3 Iustin Pop
2971 7c0d6283 Michael Hanselmann
2972 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2973 a8083063 Iustin Pop
                          instance_name, primary_node,
2974 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
2975 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
2976 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2977 a8083063 Iustin Pop

2978 a8083063 Iustin Pop
  """
2979 a8083063 Iustin Pop
  #TODO: compute space requirements
2980 a8083063 Iustin Pop
2981 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2982 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2983 a8083063 Iustin Pop
    disks = []
2984 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2985 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2986 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2987 923b1523 Iustin Pop
2988 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2989 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2990 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2991 a8083063 Iustin Pop
                           iv_name = "sda")
2992 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2993 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2994 a8083063 Iustin Pop
                           iv_name = "sdb")
2995 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2996 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2997 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2998 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2999 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
3000 ffa1c0dc Iustin Pop
    (minor_pa, minor_pb,
3001 a1578d63 Iustin Pop
     minor_sa, minor_sb) = cfg.AllocateDRBDMinor(
3002 a1578d63 Iustin Pop
      [primary_node, primary_node, remote_node, remote_node], instance_name)
3003 ffa1c0dc Iustin Pop
3004 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
3005 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
3006 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
3007 ffa1c0dc Iustin Pop
                                        disk_sz, names[0:2], "sda",
3008 ffa1c0dc Iustin Pop
                                        minor_pa, minor_sa)
3009 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
3010 ffa1c0dc Iustin Pop
                                        swap_sz, names[2:4], "sdb",
3011 ffa1c0dc Iustin Pop
                                        minor_pb, minor_sb)
3012 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
3013 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3014 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3015 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3016 0f1a06e3 Manuel Franceschini
3017 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
3018 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
3019 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
3020 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
3021 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
3022 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
3023 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
3024 a8083063 Iustin Pop
  else:
3025 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3026 a8083063 Iustin Pop
  return disks
3027 a8083063 Iustin Pop
3028 a8083063 Iustin Pop
3029 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3030 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3031 3ecf6786 Iustin Pop

3032 3ecf6786 Iustin Pop
  """
3033 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3034 a0c3fea1 Michael Hanselmann
3035 a0c3fea1 Michael Hanselmann
3036 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
3037 a8083063 Iustin Pop
  """Create all disks for an instance.
3038 a8083063 Iustin Pop

3039 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3040 a8083063 Iustin Pop

3041 a8083063 Iustin Pop
  Args:
3042 a8083063 Iustin Pop
    instance: the instance object
3043 a8083063 Iustin Pop

3044 a8083063 Iustin Pop
  Returns:
3045 a8083063 Iustin Pop
    True or False showing the success of the creation process
3046 a8083063 Iustin Pop

3047 a8083063 Iustin Pop
  """
3048 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3049 a0c3fea1 Michael Hanselmann
3050 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3051 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3052 0f1a06e3 Manuel Franceschini
    result = rpc.call_file_storage_dir_create(instance.primary_node,
3053 0f1a06e3 Manuel Franceschini
                                              file_storage_dir)
3054 0f1a06e3 Manuel Franceschini
3055 0f1a06e3 Manuel Franceschini
    if not result:
3056 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
3057 0f1a06e3 Manuel Franceschini
      return False
3058 0f1a06e3 Manuel Franceschini
3059 0f1a06e3 Manuel Franceschini
    if not result[0]:
3060 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
3061 0f1a06e3 Manuel Franceschini
      return False
3062 0f1a06e3 Manuel Franceschini
3063 a8083063 Iustin Pop
  for device in instance.disks:
3064 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
3065 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
3066 a8083063 Iustin Pop
    #HARDCODE
3067 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3068 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
3069 3f78eef2 Iustin Pop
                                        device, False, info):
3070 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
3071 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
3072 a8083063 Iustin Pop
        return False
3073 a8083063 Iustin Pop
    #HARDCODE
3074 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3075 3f78eef2 Iustin Pop
                                    instance, device, info):
3076 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
3077 a8083063 Iustin Pop
                   device.iv_name)
3078 a8083063 Iustin Pop
      return False
3079 1c6e3627 Manuel Franceschini
3080 a8083063 Iustin Pop
  return True
3081 a8083063 Iustin Pop
3082 a8083063 Iustin Pop
3083 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
3084 a8083063 Iustin Pop
  """Remove all disks for an instance.
3085 a8083063 Iustin Pop

3086 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3087 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3088 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3089 a8083063 Iustin Pop
  with `_CreateDisks()`).
3090 a8083063 Iustin Pop

3091 a8083063 Iustin Pop
  Args:
3092 a8083063 Iustin Pop
    instance: the instance object
3093 a8083063 Iustin Pop

3094 a8083063 Iustin Pop
  Returns:
3095 a8083063 Iustin Pop
    True or False showing the success of the removal proces
3096 a8083063 Iustin Pop

3097 a8083063 Iustin Pop
  """
3098 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
3099 a8083063 Iustin Pop
3100 a8083063 Iustin Pop
  result = True
3101 a8083063 Iustin Pop
  for device in instance.disks:
3102 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3103 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
3104 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
3105 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
3106 a8083063 Iustin Pop
                     " continuing anyway" %
3107 a8083063 Iustin Pop
                     (device.iv_name, node))
3108 a8083063 Iustin Pop
        result = False
3109 0f1a06e3 Manuel Franceschini
3110 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3111 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3112 0f1a06e3 Manuel Franceschini
    if not rpc.call_file_storage_dir_remove(instance.primary_node,
3113 0f1a06e3 Manuel Franceschini
                                            file_storage_dir):
3114 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
3115 0f1a06e3 Manuel Franceschini
      result = False
3116 0f1a06e3 Manuel Franceschini
3117 a8083063 Iustin Pop
  return result
3118 a8083063 Iustin Pop
3119 a8083063 Iustin Pop
3120 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
3121 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3122 e2fe6369 Iustin Pop

3123 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
3124 e2fe6369 Iustin Pop

3125 e2fe6369 Iustin Pop
  """
3126 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3127 e2fe6369 Iustin Pop
  req_size_dict = {
3128 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3129 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
3130 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
3131 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
3132 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3133 e2fe6369 Iustin Pop
  }
3134 e2fe6369 Iustin Pop
3135 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3136 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3137 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3138 e2fe6369 Iustin Pop
3139 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3140 e2fe6369 Iustin Pop
3141 e2fe6369 Iustin Pop
3142 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3143 a8083063 Iustin Pop
  """Create an instance.
3144 a8083063 Iustin Pop

3145 a8083063 Iustin Pop
  """
3146 a8083063 Iustin Pop
  HPATH = "instance-add"
3147 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3148 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
3149 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
3150 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
3151 7baf741d Guido Trotter
  REQ_BGL = False
3152 7baf741d Guido Trotter
3153 7baf741d Guido Trotter
  def _ExpandNode(self, node):
3154 7baf741d Guido Trotter
    """Expands and checks one node name.
3155 7baf741d Guido Trotter

3156 7baf741d Guido Trotter
    """
3157 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
3158 7baf741d Guido Trotter
    if node_full is None:
3159 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
3160 7baf741d Guido Trotter
    return node_full
3161 7baf741d Guido Trotter
3162 7baf741d Guido Trotter
  def ExpandNames(self):
3163 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
3164 7baf741d Guido Trotter

3165 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
3166 7baf741d Guido Trotter

3167 7baf741d Guido Trotter
    """
3168 7baf741d Guido Trotter
    self.needed_locks = {}
3169 7baf741d Guido Trotter
3170 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
3171 7baf741d Guido Trotter
    for attr in ["kernel_path", "initrd_path", "pnode", "snode",
3172 7baf741d Guido Trotter
                 "iallocator", "hvm_boot_order", "hvm_acpi", "hvm_pae",
3173 7baf741d Guido Trotter
                 "hvm_cdrom_image_path", "hvm_nic_type", "hvm_disk_type",
3174 e69d05fd Iustin Pop
                 "vnc_bind_address", "hypervisor"]:
3175 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
3176 7baf741d Guido Trotter
        setattr(self.op, attr, None)
3177 7baf741d Guido Trotter
3178 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
3179 4b2f38dd Iustin Pop
3180 7baf741d Guido Trotter
    # verify creation mode
3181 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
3182 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
3183 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3184 7baf741d Guido Trotter
                                 self.op.mode)
3185 4b2f38dd Iustin Pop
3186 7baf741d Guido Trotter
    # disk template and mirror node verification
3187 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3188 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
3189 7baf741d Guido Trotter
3190 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
3191 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
3192 4b2f38dd Iustin Pop
3193 4b2f38dd Iustin Pop
    enabled_hvs = self.cfg.GetClusterInfo().enabled_hypervisors
3194 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
3195 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3196 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
3197 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
3198 4b2f38dd Iustin Pop
3199 7baf741d Guido Trotter
    #### instance parameters check
3200 7baf741d Guido Trotter
3201 7baf741d Guido Trotter
    # instance name verification
3202 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
3203 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
3204 7baf741d Guido Trotter
3205 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
3206 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
3207 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
3208 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3209 7baf741d Guido Trotter
                                 instance_name)
3210 7baf741d Guido Trotter
3211 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3212 7baf741d Guido Trotter
3213 7baf741d Guido Trotter
    # ip validity checks
3214 7baf741d Guido Trotter
    ip = getattr(self.op, "ip", None)
3215 7baf741d Guido Trotter
    if ip is None or ip.lower() == "none":
3216 7baf741d Guido Trotter
      inst_ip = None
3217 7baf741d Guido Trotter
    elif ip.lower() == "auto":
3218 7baf741d Guido Trotter
      inst_ip = hostname1.ip
3219 7baf741d Guido Trotter
    else:
3220 7baf741d Guido Trotter
      if not utils.IsValidIP(ip):
3221 7baf741d Guido Trotter
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3222 7baf741d Guido Trotter
                                   " like a valid IP" % ip)
3223 7baf741d Guido Trotter
      inst_ip = ip
3224 7baf741d Guido Trotter
    self.inst_ip = self.op.ip = inst_ip
3225 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
3226 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
3227 7baf741d Guido Trotter
3228 7baf741d Guido Trotter
    # MAC address verification
3229 7baf741d Guido Trotter
    if self.op.mac != "auto":
3230 7baf741d Guido Trotter
      if not utils.IsValidMac(self.op.mac.lower()):
3231 7baf741d Guido Trotter
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3232 7baf741d Guido Trotter
                                   self.op.mac)
3233 7baf741d Guido Trotter
3234 7baf741d Guido Trotter
    # boot order verification
3235 7baf741d Guido Trotter
    if self.op.hvm_boot_order is not None:
3236 7baf741d Guido Trotter
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3237 7baf741d Guido Trotter
        raise errors.OpPrereqError("invalid boot order specified,"
3238 7baf741d Guido Trotter
                                   " must be one or more of [acdn]")
3239 7baf741d Guido Trotter
    # file storage checks
3240 7baf741d Guido Trotter
    if (self.op.file_driver and
3241 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
3242 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3243 7baf741d Guido Trotter
                                 self.op.file_driver)
3244 7baf741d Guido Trotter
3245 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3246 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
3247 7baf741d Guido Trotter
3248 7baf741d Guido Trotter
    ### Node/iallocator related checks
3249 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3250 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3251 7baf741d Guido Trotter
                                 " node must be given")
3252 7baf741d Guido Trotter
3253 7baf741d Guido Trotter
    if self.op.iallocator:
3254 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3255 7baf741d Guido Trotter
    else:
3256 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
3257 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
3258 7baf741d Guido Trotter
      if self.op.snode is not None:
3259 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
3260 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
3261 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3262 7baf741d Guido Trotter
3263 7baf741d Guido Trotter
    # in case of import lock the source node too
3264 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
3265 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
3266 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
3267 7baf741d Guido Trotter
3268 7baf741d Guido Trotter
      if src_node is None or src_path is None:
3269 7baf741d Guido Trotter
        raise errors.OpPrereqError("Importing an instance requires source"
3270 7baf741d Guido Trotter
                                   " node and path options")
3271 7baf741d Guido Trotter
3272 7baf741d Guido Trotter
      if not os.path.isabs(src_path):
3273 7baf741d Guido Trotter
        raise errors.OpPrereqError("The source path must be absolute")
3274 7baf741d Guido Trotter
3275 7baf741d Guido Trotter
      self.op.src_node = src_node = self._ExpandNode(src_node)
3276 7baf741d Guido Trotter
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3277 7baf741d Guido Trotter
        self.needed_locks[locking.LEVEL_NODE].append(src_node)
3278 7baf741d Guido Trotter
3279 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
3280 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
3281 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
3282 a8083063 Iustin Pop
3283 538475ca Iustin Pop
  def _RunAllocator(self):
3284 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3285 538475ca Iustin Pop

3286 538475ca Iustin Pop
    """
3287 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3288 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3289 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3290 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3291 d6a02168 Michael Hanselmann
    ial = IAllocator(self.cfg,
3292 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3293 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3294 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3295 d1c2dd75 Iustin Pop
                     tags=[],
3296 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3297 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
3298 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
3299 d1c2dd75 Iustin Pop
                     disks=disks,
3300 d1c2dd75 Iustin Pop
                     nics=nics,
3301 29859cb7 Iustin Pop
                     )
3302 d1c2dd75 Iustin Pop
3303 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3304 d1c2dd75 Iustin Pop
3305 d1c2dd75 Iustin Pop
    if not ial.success:
3306 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3307 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3308 d1c2dd75 Iustin Pop
                                                           ial.info))
3309 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3310 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3311 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3312 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
3313 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
3314 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3315 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
3316 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
3317 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
3318 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
3319 27579978 Iustin Pop
    if ial.required_nodes == 2:
3320 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3321 538475ca Iustin Pop
3322 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3323 a8083063 Iustin Pop
    """Build hooks env.
3324 a8083063 Iustin Pop

3325 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3326 a8083063 Iustin Pop

3327 a8083063 Iustin Pop
    """
3328 a8083063 Iustin Pop
    env = {
3329 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3330 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3331 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3332 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3333 a8083063 Iustin Pop
      }
3334 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3335 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3336 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3337 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
3338 396e1b78 Michael Hanselmann
3339 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3340 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3341 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3342 396e1b78 Michael Hanselmann
      status=self.instance_status,
3343 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3344 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
3345 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
3346 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3347 396e1b78 Michael Hanselmann
    ))
3348 a8083063 Iustin Pop
3349 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
3350 a8083063 Iustin Pop
          self.secondaries)
3351 a8083063 Iustin Pop
    return env, nl, nl
3352 a8083063 Iustin Pop
3353 a8083063 Iustin Pop
3354 a8083063 Iustin Pop
  def CheckPrereq(self):
3355 a8083063 Iustin Pop
    """Check prerequisites.
3356 a8083063 Iustin Pop

3357 a8083063 Iustin Pop
    """
3358 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3359 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3360 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3361 eedc99de Manuel Franceschini
                                 " instances")
3362 eedc99de Manuel Franceschini
3363 e69d05fd Iustin Pop
3364 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3365 7baf741d Guido Trotter
      src_node = self.op.src_node
3366 7baf741d Guido Trotter
      src_path = self.op.src_path
3367 a8083063 Iustin Pop
3368 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
3369 a8083063 Iustin Pop
3370 a8083063 Iustin Pop
      if not export_info:
3371 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3372 a8083063 Iustin Pop
3373 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3374 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3375 a8083063 Iustin Pop
3376 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3377 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3378 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3379 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3380 a8083063 Iustin Pop
3381 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3382 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3383 3ecf6786 Iustin Pop
                                   " one data disk")
3384 a8083063 Iustin Pop
3385 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3386 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3387 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3388 a8083063 Iustin Pop
                                                         'disk0_dump'))
3389 a8083063 Iustin Pop
      self.src_image = diskimage
3390 901a65c1 Iustin Pop
3391 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3392 901a65c1 Iustin Pop
3393 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3394 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3395 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3396 901a65c1 Iustin Pop
3397 901a65c1 Iustin Pop
    if self.op.ip_check:
3398 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3399 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3400 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
3401 901a65c1 Iustin Pop
3402 901a65c1 Iustin Pop
    # bridge verification
3403 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3404 901a65c1 Iustin Pop
    if bridge is None:
3405 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3406 901a65c1 Iustin Pop
    else:
3407 901a65c1 Iustin Pop
      self.op.bridge = bridge
3408 901a65c1 Iustin Pop
3409 538475ca Iustin Pop
    #### allocator run
3410 538475ca Iustin Pop
3411 538475ca Iustin Pop
    if self.op.iallocator is not None:
3412 538475ca Iustin Pop
      self._RunAllocator()
3413 0f1a06e3 Manuel Franceschini
3414 901a65c1 Iustin Pop
    #### node related checks
3415 901a65c1 Iustin Pop
3416 901a65c1 Iustin Pop
    # check primary node
3417 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3418 7baf741d Guido Trotter
    assert self.pnode is not None, \
3419 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
3420 901a65c1 Iustin Pop
    self.secondaries = []
3421 901a65c1 Iustin Pop
3422 901a65c1 Iustin Pop
    # mirror node verification
3423 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3424 7baf741d Guido Trotter
      if self.op.snode is None:
3425 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3426 3ecf6786 Iustin Pop
                                   " a mirror node")
3427 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
3428 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3429 3ecf6786 Iustin Pop
                                   " the primary node.")
3430 7baf741d Guido Trotter
      self.secondaries.append(self.op.snode)
3431 a8083063 Iustin Pop
3432 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3433 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3434 ed1ebc60 Guido Trotter
3435 8d75db10 Iustin Pop
    # Check lv size requirements
3436 8d75db10 Iustin Pop
    if req_size is not None:
3437 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3438 e69d05fd Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3439 e69d05fd Iustin Pop
                                    self.op.hypervisor)
3440 8d75db10 Iustin Pop
      for node in nodenames:
3441 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3442 8d75db10 Iustin Pop
        if not info:
3443 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3444 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3445 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3446 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3447 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3448 8d75db10 Iustin Pop
                                     " node %s" % node)
3449 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3450 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3451 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3452 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3453 ed1ebc60 Guido Trotter
3454 a8083063 Iustin Pop
    # os verification
3455 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3456 dfa96ded Guido Trotter
    if not os_obj:
3457 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3458 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3459 a8083063 Iustin Pop
3460 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3461 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3462 3b6d8c9b Iustin Pop
3463 901a65c1 Iustin Pop
    # bridge check on primary node
3464 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3465 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3466 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3467 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3468 a8083063 Iustin Pop
3469 49ce1563 Iustin Pop
    # memory check on primary node
3470 49ce1563 Iustin Pop
    if self.op.start:
3471 49ce1563 Iustin Pop
      _CheckNodeFreeMemory(self.cfg, self.pnode.name,
3472 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3473 e69d05fd Iustin Pop
                           self.op.mem_size, self.op.hypervisor)
3474 49ce1563 Iustin Pop
3475 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
3476 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
3477 7baf741d Guido Trotter
      # FIXME (als): shouldn't these checks happen on the destination node?
3478 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
3479 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
3480 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
3481 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3482 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
3483 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
3484 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
3485 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
3486 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3487 31a853d2 Iustin Pop
3488 31a853d2 Iustin Pop
    # vnc_bind_address verification
3489 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
3490 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
3491 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
3492 31a853d2 Iustin Pop
                                   " like a valid IP address" %
3493 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
3494 31a853d2 Iustin Pop
3495 5397e0b7 Alexander Schreiber
    # Xen HVM device type checks
3496 00cd937c Iustin Pop
    if self.op.hypervisor == constants.HT_XEN_HVM:
3497 5397e0b7 Alexander Schreiber
      if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
3498 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid NIC type %s specified for Xen HVM"
3499 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_nic_type)
3500 5397e0b7 Alexander Schreiber
      if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
3501 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid disk type %s specified for Xen HVM"
3502 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_disk_type)
3503 5397e0b7 Alexander Schreiber
3504 a8083063 Iustin Pop
    if self.op.start:
3505 a8083063 Iustin Pop
      self.instance_status = 'up'
3506 a8083063 Iustin Pop
    else:
3507 a8083063 Iustin Pop
      self.instance_status = 'down'
3508 a8083063 Iustin Pop
3509 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3510 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3511 a8083063 Iustin Pop

3512 a8083063 Iustin Pop
    """
3513 a8083063 Iustin Pop
    instance = self.op.instance_name
3514 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3515 a8083063 Iustin Pop
3516 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3517 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3518 1862d460 Alexander Schreiber
    else:
3519 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3520 1862d460 Alexander Schreiber
3521 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3522 a8083063 Iustin Pop
    if self.inst_ip is not None:
3523 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3524 a8083063 Iustin Pop
3525 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
3526 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3527 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3528 2a6469d5 Alexander Schreiber
    else:
3529 2a6469d5 Alexander Schreiber
      network_port = None
3530 58acb49d Alexander Schreiber
3531 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is None:
3532 31a853d2 Iustin Pop
      self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3533 31a853d2 Iustin Pop
3534 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3535 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3536 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3537 2c313123 Manuel Franceschini
    else:
3538 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3539 2c313123 Manuel Franceschini
3540 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3541 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3542 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
3543 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3544 0f1a06e3 Manuel Franceschini
3545 0f1a06e3 Manuel Franceschini
3546 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3547 a8083063 Iustin Pop
                                  self.op.disk_template,
3548 a8083063 Iustin Pop
                                  instance, pnode_name,
3549 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3550 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3551 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3552 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3553 a8083063 Iustin Pop
3554 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3555 a8083063 Iustin Pop
                            primary_node=pnode_name,
3556 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3557 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3558 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3559 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3560 a8083063 Iustin Pop
                            status=self.instance_status,
3561 58acb49d Alexander Schreiber
                            network_port=network_port,
3562 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3563 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3564 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3565 31a853d2 Iustin Pop
                            hvm_acpi=self.op.hvm_acpi,
3566 31a853d2 Iustin Pop
                            hvm_pae=self.op.hvm_pae,
3567 31a853d2 Iustin Pop
                            hvm_cdrom_image_path=self.op.hvm_cdrom_image_path,
3568 31a853d2 Iustin Pop
                            vnc_bind_address=self.op.vnc_bind_address,
3569 5397e0b7 Alexander Schreiber
                            hvm_nic_type=self.op.hvm_nic_type,
3570 5397e0b7 Alexander Schreiber
                            hvm_disk_type=self.op.hvm_disk_type,
3571 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
3572 a8083063 Iustin Pop
                            )
3573 a8083063 Iustin Pop
3574 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3575 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3576 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3577 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance)
3578 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3579 a8083063 Iustin Pop
3580 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3581 a8083063 Iustin Pop
3582 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3583 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
3584 7baf741d Guido Trotter
    # added the instance to the config
3585 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
3586 a1578d63 Iustin Pop
    # Remove the temp. assignements for the instance's drbds
3587 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance)
3588 a8083063 Iustin Pop
3589 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3590 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3591 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3592 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3593 a8083063 Iustin Pop
      time.sleep(15)
3594 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3595 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3596 a8083063 Iustin Pop
    else:
3597 a8083063 Iustin Pop
      disk_abort = False
3598 a8083063 Iustin Pop
3599 a8083063 Iustin Pop
    if disk_abort:
3600 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3601 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3602 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
3603 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3604 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3605 3ecf6786 Iustin Pop
                               " this instance")
3606 a8083063 Iustin Pop
3607 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3608 a8083063 Iustin Pop
                (instance, pnode_name))
3609 a8083063 Iustin Pop
3610 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3611 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3612 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3613 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3614 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3615 3ecf6786 Iustin Pop
                                   " on node %s" %
3616 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3617 a8083063 Iustin Pop
3618 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3619 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3620 a8083063 Iustin Pop
        src_node = self.op.src_node
3621 a8083063 Iustin Pop
        src_image = self.src_image
3622 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
3623 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3624 62c9ec92 Iustin Pop
                                           src_node, src_image, cluster_name):
3625 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3626 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3627 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3628 a8083063 Iustin Pop
      else:
3629 a8083063 Iustin Pop
        # also checked in the prereq part
3630 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3631 3ecf6786 Iustin Pop
                                     % self.op.mode)
3632 a8083063 Iustin Pop
3633 a8083063 Iustin Pop
    if self.op.start:
3634 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3635 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3636 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3637 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3638 a8083063 Iustin Pop
3639 a8083063 Iustin Pop
3640 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3641 a8083063 Iustin Pop
  """Connect to an instance's console.
3642 a8083063 Iustin Pop

3643 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3644 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3645 a8083063 Iustin Pop
  console.
3646 a8083063 Iustin Pop

3647 a8083063 Iustin Pop
  """
3648 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3649 8659b73e Guido Trotter
  REQ_BGL = False
3650 8659b73e Guido Trotter
3651 8659b73e Guido Trotter
  def ExpandNames(self):
3652 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3653 a8083063 Iustin Pop
3654 a8083063 Iustin Pop
  def CheckPrereq(self):
3655 a8083063 Iustin Pop
    """Check prerequisites.
3656 a8083063 Iustin Pop

3657 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3658 a8083063 Iustin Pop

3659 a8083063 Iustin Pop
    """
3660 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3661 8659b73e Guido Trotter
    assert self.instance is not None, \
3662 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3663 a8083063 Iustin Pop
3664 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3665 a8083063 Iustin Pop
    """Connect to the console of an instance
3666 a8083063 Iustin Pop

3667 a8083063 Iustin Pop
    """
3668 a8083063 Iustin Pop
    instance = self.instance
3669 a8083063 Iustin Pop
    node = instance.primary_node
3670 a8083063 Iustin Pop
3671 e69d05fd Iustin Pop
    node_insts = rpc.call_instance_list([node],
3672 e69d05fd Iustin Pop
                                        [instance.hypervisor])[node]
3673 a8083063 Iustin Pop
    if node_insts is False:
3674 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3675 a8083063 Iustin Pop
3676 a8083063 Iustin Pop
    if instance.name not in node_insts:
3677 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3678 a8083063 Iustin Pop
3679 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3680 a8083063 Iustin Pop
3681 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
3682 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3683 b047857b Michael Hanselmann
3684 82122173 Iustin Pop
    # build ssh cmdline
3685 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3686 a8083063 Iustin Pop
3687 a8083063 Iustin Pop
3688 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3689 a8083063 Iustin Pop
  """Replace the disks of an instance.
3690 a8083063 Iustin Pop

3691 a8083063 Iustin Pop
  """
3692 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3693 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3694 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3695 efd990e4 Guido Trotter
  REQ_BGL = False
3696 efd990e4 Guido Trotter
3697 efd990e4 Guido Trotter
  def ExpandNames(self):
3698 efd990e4 Guido Trotter
    self._ExpandAndLockInstance()
3699 efd990e4 Guido Trotter
3700 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
3701 efd990e4 Guido Trotter
      self.op.remote_node = None
3702 efd990e4 Guido Trotter
3703 efd990e4 Guido Trotter
    ia_name = getattr(self.op, "iallocator", None)
3704 efd990e4 Guido Trotter
    if ia_name is not None:
3705 efd990e4 Guido Trotter
      if self.op.remote_node is not None:
3706 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
3707 efd990e4 Guido Trotter
                                   " secondary, not both")
3708 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3709 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
3710 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3711 efd990e4 Guido Trotter
      if remote_node is None:
3712 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
3713 efd990e4 Guido Trotter
                                   self.op.remote_node)
3714 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
3715 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
3716 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3717 efd990e4 Guido Trotter
    else:
3718 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
3719 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3720 efd990e4 Guido Trotter
3721 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
3722 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
3723 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
3724 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
3725 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
3726 efd990e4 Guido Trotter
      self._LockInstancesNodes()
3727 a8083063 Iustin Pop
3728 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3729 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3730 b6e82a65 Iustin Pop

3731 b6e82a65 Iustin Pop
    """
3732 d6a02168 Michael Hanselmann
    ial = IAllocator(self.cfg,
3733 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3734 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3735 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3736 b6e82a65 Iustin Pop
3737 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3738 b6e82a65 Iustin Pop
3739 b6e82a65 Iustin Pop
    if not ial.success:
3740 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3741 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3742 b6e82a65 Iustin Pop
                                                           ial.info))
3743 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3744 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3745 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3746 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3747 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3748 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3749 b6e82a65 Iustin Pop
                    self.op.remote_node)
3750 b6e82a65 Iustin Pop
3751 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3752 a8083063 Iustin Pop
    """Build hooks env.
3753 a8083063 Iustin Pop

3754 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3755 a8083063 Iustin Pop

3756 a8083063 Iustin Pop
    """
3757 a8083063 Iustin Pop
    env = {
3758 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3759 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3760 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3761 a8083063 Iustin Pop
      }
3762 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3763 0834c866 Iustin Pop
    nl = [
3764 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
3765 0834c866 Iustin Pop
      self.instance.primary_node,
3766 0834c866 Iustin Pop
      ]
3767 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3768 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3769 a8083063 Iustin Pop
    return env, nl, nl
3770 a8083063 Iustin Pop
3771 a8083063 Iustin Pop
  def CheckPrereq(self):
3772 a8083063 Iustin Pop
    """Check prerequisites.
3773 a8083063 Iustin Pop

3774 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3775 a8083063 Iustin Pop

3776 a8083063 Iustin Pop
    """
3777 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3778 efd990e4 Guido Trotter
    assert instance is not None, \
3779 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3780 a8083063 Iustin Pop
    self.instance = instance
3781 a8083063 Iustin Pop
3782 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3783 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3784 a9e0c397 Iustin Pop
                                 " network mirrored.")
3785 a8083063 Iustin Pop
3786 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3787 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3788 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3789 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3790 a8083063 Iustin Pop
3791 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3792 a9e0c397 Iustin Pop
3793 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3794 b6e82a65 Iustin Pop
    if ia_name is not None:
3795 de8c7666 Guido Trotter
      self._RunAllocator()
3796 b6e82a65 Iustin Pop
3797 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3798 a9e0c397 Iustin Pop
    if remote_node is not None:
3799 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3800 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
3801 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
3802 a9e0c397 Iustin Pop
    else:
3803 a9e0c397 Iustin Pop
      self.remote_node_info = None
3804 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3805 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3806 3ecf6786 Iustin Pop
                                 " the instance.")
3807 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3808 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3809 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3810 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3811 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3812 0834c866 Iustin Pop
                                   " replacement")
3813 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3814 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3815 7df43a76 Iustin Pop
          remote_node is not None):
3816 7df43a76 Iustin Pop
        # switch to replace secondary mode
3817 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3818 7df43a76 Iustin Pop
3819 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3820 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3821 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3822 a9e0c397 Iustin Pop
                                   " both at once")
3823 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3824 a9e0c397 Iustin Pop
        if remote_node is not None:
3825 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3826 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3827 a9e0c397 Iustin Pop
                                     " node disk replacement")
3828 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3829 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3830 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3831 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3832 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3833 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3834 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3835 a9e0c397 Iustin Pop
      else:
3836 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3837 a9e0c397 Iustin Pop
3838 a9e0c397 Iustin Pop
    for name in self.op.disks:
3839 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3840 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3841 a9e0c397 Iustin Pop
                                   (name, instance.name))
3842 a8083063 Iustin Pop
3843 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3844 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3845 a9e0c397 Iustin Pop

3846 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3847 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3848 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3849 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3850 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3851 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3852 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3853 a9e0c397 Iustin Pop
      - wait for sync across all devices
3854 a9e0c397 Iustin Pop
      - for each modified disk:
3855 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3856 a9e0c397 Iustin Pop

3857 a9e0c397 Iustin Pop
    Failures are not very well handled.
3858 cff90b79 Iustin Pop

3859 a9e0c397 Iustin Pop
    """
3860 cff90b79 Iustin Pop
    steps_total = 6
3861 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3862 a9e0c397 Iustin Pop
    instance = self.instance
3863 a9e0c397 Iustin Pop
    iv_names = {}
3864 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3865 a9e0c397 Iustin Pop
    # start of work
3866 a9e0c397 Iustin Pop
    cfg = self.cfg
3867 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3868 cff90b79 Iustin Pop
    oth_node = self.oth_node
3869 cff90b79 Iustin Pop
3870 cff90b79 Iustin Pop
    # Step: check device activation
3871 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3872 cff90b79 Iustin Pop
    info("checking volume groups")
3873 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3874 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3875 cff90b79 Iustin Pop
    if not results:
3876 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3877 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3878 cff90b79 Iustin Pop
      res = results.get(node, False)
3879 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3880 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3881 cff90b79 Iustin Pop
                                 (my_vg, node))
3882 cff90b79 Iustin Pop
    for dev in instance.disks:
3883 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3884 cff90b79 Iustin Pop
        continue
3885 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3886 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3887 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3888 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3889 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3890 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3891 cff90b79 Iustin Pop
3892 cff90b79 Iustin Pop
    # Step: check other node consistency
3893 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3894 cff90b79 Iustin Pop
    for dev in instance.disks:
3895 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3896 cff90b79 Iustin Pop
        continue
3897 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3898 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3899 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3900 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3901 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3902 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3903 cff90b79 Iustin Pop
3904 cff90b79 Iustin Pop
    # Step: create new storage
3905 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3906 a9e0c397 Iustin Pop
    for dev in instance.disks:
3907 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3908 a9e0c397 Iustin Pop
        continue
3909 a9e0c397 Iustin Pop
      size = dev.size
3910 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3911 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3912 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3913 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3914 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3915 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3916 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3917 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3918 a9e0c397 Iustin Pop
      old_lvs = dev.children
3919 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3920 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3921 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3922 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3923 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3924 a9e0c397 Iustin Pop
      # are talking about the secondary node
3925 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3926 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3927 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3928 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3929 a9e0c397 Iustin Pop
                                   " node '%s'" %
3930 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3931 a9e0c397 Iustin Pop
3932 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3933 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3934 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3935 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3936 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3937 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3938 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3939 cff90b79 Iustin Pop
      #dev.children = []
3940 cff90b79 Iustin Pop
      #cfg.Update(instance)
3941 a9e0c397 Iustin Pop
3942 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3943 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3944 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3945 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3946 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3947 cff90b79 Iustin Pop
3948 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3949 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3950 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3951 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3952 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3953 cff90b79 Iustin Pop
      rlist = []
3954 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3955 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3956 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3957 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3958 cff90b79 Iustin Pop
3959 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3960 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3961 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3962 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3963 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3964 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3965 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3966 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3967 cff90b79 Iustin Pop
3968 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3969 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3970 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3971 a9e0c397 Iustin Pop
3972 cff90b79 Iustin Pop
      for disk in old_lvs:
3973 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3974 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3975 a9e0c397 Iustin Pop
3976 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3977 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3978 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3979 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3980 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3981 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3982 cff90b79 Iustin Pop
                    " logical volumes")
3983 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3984 a9e0c397 Iustin Pop
3985 a9e0c397 Iustin Pop
      dev.children = new_lvs
3986 a9e0c397 Iustin Pop
      cfg.Update(instance)
3987 a9e0c397 Iustin Pop
3988 cff90b79 Iustin Pop
    # Step: wait for sync
3989 a9e0c397 Iustin Pop
3990 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3991 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3992 a9e0c397 Iustin Pop
    # return value
3993 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3994 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3995 a9e0c397 Iustin Pop
3996 a9e0c397 Iustin Pop
    # so check manually all the devices
3997 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3998 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3999 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
4000 a9e0c397 Iustin Pop
      if is_degr:
4001 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4002 a9e0c397 Iustin Pop
4003 cff90b79 Iustin Pop
    # Step: remove old storage
4004 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4005 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4006 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
4007 a9e0c397 Iustin Pop
      for lv in old_lvs:
4008 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
4009 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
4010 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
4011 a9e0c397 Iustin Pop
          continue
4012 a9e0c397 Iustin Pop
4013 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
4014 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
4015 a9e0c397 Iustin Pop

4016 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4017 a9e0c397 Iustin Pop
      - for all disks of the instance:
4018 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
4019 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
4020 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
4021 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
4022 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
4023 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
4024 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
4025 a9e0c397 Iustin Pop
          not network enabled
4026 a9e0c397 Iustin Pop
      - wait for sync across all devices
4027 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
4028 a9e0c397 Iustin Pop

4029 a9e0c397 Iustin Pop
    Failures are not very well handled.
4030 0834c866 Iustin Pop

4031 a9e0c397 Iustin Pop
    """
4032 0834c866 Iustin Pop
    steps_total = 6
4033 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4034 a9e0c397 Iustin Pop
    instance = self.instance
4035 a9e0c397 Iustin Pop
    iv_names = {}
4036 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4037 a9e0c397 Iustin Pop
    # start of work
4038 a9e0c397 Iustin Pop
    cfg = self.cfg
4039 a9e0c397 Iustin Pop
    old_node = self.tgt_node
4040 a9e0c397 Iustin Pop
    new_node = self.new_node
4041 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
4042 0834c866 Iustin Pop
4043 0834c866 Iustin Pop
    # Step: check device activation
4044 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4045 0834c866 Iustin Pop
    info("checking volume groups")
4046 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
4047 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
4048 0834c866 Iustin Pop
    if not results:
4049 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4050 0834c866 Iustin Pop
    for node in pri_node, new_node:
4051 0834c866 Iustin Pop
      res = results.get(node, False)
4052 0834c866 Iustin Pop
      if not res or my_vg not in res:
4053 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4054 0834c866 Iustin Pop
                                 (my_vg, node))
4055 0834c866 Iustin Pop
    for dev in instance.disks:
4056 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4057 0834c866 Iustin Pop
        continue
4058 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
4059 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4060 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
4061 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
4062 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
4063 0834c866 Iustin Pop
4064 0834c866 Iustin Pop
    # Step: check other node consistency
4065 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4066 0834c866 Iustin Pop
    for dev in instance.disks:
4067 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4068 0834c866 Iustin Pop
        continue
4069 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
4070 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
4071 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4072 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4073 0834c866 Iustin Pop
                                 pri_node)
4074 0834c866 Iustin Pop
4075 0834c866 Iustin Pop
    # Step: create new storage
4076 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4077 468b46f9 Iustin Pop
    for dev in instance.disks:
4078 a9e0c397 Iustin Pop
      size = dev.size
4079 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
4080 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4081 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4082 a9e0c397 Iustin Pop
      # are talking about the secondary node
4083 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4084 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
4085 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4086 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4087 a9e0c397 Iustin Pop
                                   " node '%s'" %
4088 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4089 a9e0c397 Iustin Pop
4090 0834c866 Iustin Pop
4091 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
4092 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
4093 a1578d63 Iustin Pop
    # error and the success paths
4094 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4095 a1578d63 Iustin Pop
                                   instance.name)
4096 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
4097 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4098 468b46f9 Iustin Pop
    for dev, new_minor in zip(instance.disks, minors):
4099 0834c866 Iustin Pop
      size = dev.size
4100 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
4101 a9e0c397 Iustin Pop
      # create new devices on new_node
4102 ffa1c0dc Iustin Pop
      if pri_node == dev.logical_id[0]:
4103 ffa1c0dc Iustin Pop
        new_logical_id = (pri_node, new_node,
4104 f9518d38 Iustin Pop
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4105 f9518d38 Iustin Pop
                          dev.logical_id[5])
4106 ffa1c0dc Iustin Pop
      else:
4107 ffa1c0dc Iustin Pop
        new_logical_id = (new_node, pri_node,
4108 f9518d38 Iustin Pop
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4109 f9518d38 Iustin Pop
                          dev.logical_id[5])
4110 468b46f9 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children, new_logical_id)
4111 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4112 a1578d63 Iustin Pop
                    new_logical_id)
4113 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4114 ffa1c0dc Iustin Pop
                              logical_id=new_logical_id,
4115 a9e0c397 Iustin Pop
                              children=dev.children)
4116 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
4117 3f78eef2 Iustin Pop
                                        new_drbd, False,
4118 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
4119 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4120 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4121 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4122 a9e0c397 Iustin Pop
4123 0834c866 Iustin Pop
    for dev in instance.disks:
4124 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4125 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
4126 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4127 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
4128 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
4129 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4130 a9e0c397 Iustin Pop
4131 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4132 642445d9 Iustin Pop
    done = 0
4133 642445d9 Iustin Pop
    for dev in instance.disks:
4134 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4135 f9518d38 Iustin Pop
      # set the network part of the physical (unique in bdev terms) id
4136 f9518d38 Iustin Pop
      # to None, meaning detach from network
4137 f9518d38 Iustin Pop
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4138 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4139 642445d9 Iustin Pop
      # standalone state
4140 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
4141 642445d9 Iustin Pop
        done += 1
4142 642445d9 Iustin Pop
      else:
4143 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
4144 642445d9 Iustin Pop
                dev.iv_name)
4145 642445d9 Iustin Pop
4146 642445d9 Iustin Pop
    if not done:
4147 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4148 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
4149 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4150 642445d9 Iustin Pop
4151 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4152 642445d9 Iustin Pop
    # the instance to point to the new secondary
4153 642445d9 Iustin Pop
    info("updating instance configuration")
4154 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
4155 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
4156 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4157 642445d9 Iustin Pop
    cfg.Update(instance)
4158 a1578d63 Iustin Pop
    # we can remove now the temp minors as now the new values are
4159 a1578d63 Iustin Pop
    # written to the config file (and therefore stable)
4160 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance.name)
4161 a9e0c397 Iustin Pop
4162 642445d9 Iustin Pop
    # and now perform the drbd attach
4163 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4164 642445d9 Iustin Pop
    failures = []
4165 642445d9 Iustin Pop
    for dev in instance.disks:
4166 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
4167 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4168 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4169 642445d9 Iustin Pop
      # is correct
4170 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4171 ffa1c0dc Iustin Pop
      logging.debug("Disk to attach: %s", dev)
4172 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
4173 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
4174 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4175 a9e0c397 Iustin Pop
4176 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4177 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4178 a9e0c397 Iustin Pop
    # return value
4179 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4180 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
4181 a9e0c397 Iustin Pop
4182 a9e0c397 Iustin Pop
    # so check manually all the devices
4183 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4184 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4185 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
4186 a9e0c397 Iustin Pop
      if is_degr:
4187 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4188 a9e0c397 Iustin Pop
4189 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4190 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4191 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
4192 a9e0c397 Iustin Pop
      for lv in old_lvs:
4193 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4194 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
4195 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4196 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4197 a9e0c397 Iustin Pop
4198 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4199 a9e0c397 Iustin Pop
    """Execute disk replacement.
4200 a9e0c397 Iustin Pop

4201 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4202 a9e0c397 Iustin Pop

4203 a9e0c397 Iustin Pop
    """
4204 a9e0c397 Iustin Pop
    instance = self.instance
4205 22985314 Guido Trotter
4206 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
4207 22985314 Guido Trotter
    if instance.status == "down":
4208 023e3296 Guido Trotter
      _StartInstanceDisks(self.cfg, instance, True)
4209 22985314 Guido Trotter
4210 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4211 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4212 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4213 a9e0c397 Iustin Pop
      else:
4214 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4215 a9e0c397 Iustin Pop
    else:
4216 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4217 22985314 Guido Trotter
4218 22985314 Guido Trotter
    ret = fn(feedback_fn)
4219 22985314 Guido Trotter
4220 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
4221 22985314 Guido Trotter
    if instance.status == "down":
4222 023e3296 Guido Trotter
      _SafeShutdownInstanceDisks(instance, self.cfg)
4223 22985314 Guido Trotter
4224 22985314 Guido Trotter
    return ret
4225 a9e0c397 Iustin Pop
4226 a8083063 Iustin Pop
4227 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4228 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4229 8729e0d7 Iustin Pop

4230 8729e0d7 Iustin Pop
  """
4231 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4232 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4233 8729e0d7 Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount"]
4234 31e63dbf Guido Trotter
  REQ_BGL = False
4235 31e63dbf Guido Trotter
4236 31e63dbf Guido Trotter
  def ExpandNames(self):
4237 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4238 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4239 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4240 31e63dbf Guido Trotter
4241 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4242 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4243 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4244 8729e0d7 Iustin Pop
4245 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4246 8729e0d7 Iustin Pop
    """Build hooks env.
4247 8729e0d7 Iustin Pop

4248 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4249 8729e0d7 Iustin Pop

4250 8729e0d7 Iustin Pop
    """
4251 8729e0d7 Iustin Pop
    env = {
4252 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4253 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4254 8729e0d7 Iustin Pop
      }
4255 8729e0d7 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4256 8729e0d7 Iustin Pop
    nl = [
4257 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4258 8729e0d7 Iustin Pop
      self.instance.primary_node,
4259 8729e0d7 Iustin Pop
      ]
4260 8729e0d7 Iustin Pop
    return env, nl, nl
4261 8729e0d7 Iustin Pop
4262 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4263 8729e0d7 Iustin Pop
    """Check prerequisites.
4264 8729e0d7 Iustin Pop

4265 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4266 8729e0d7 Iustin Pop

4267 8729e0d7 Iustin Pop
    """
4268 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4269 31e63dbf Guido Trotter
    assert instance is not None, \
4270 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4271 31e63dbf Guido Trotter
4272 8729e0d7 Iustin Pop
    self.instance = instance
4273 8729e0d7 Iustin Pop
4274 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4275 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4276 8729e0d7 Iustin Pop
                                 " growing.")
4277 8729e0d7 Iustin Pop
4278 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
4279 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4280 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
4281 8729e0d7 Iustin Pop
4282 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4283 e69d05fd Iustin Pop
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4284 e69d05fd Iustin Pop
                                  instance.hypervisor)
4285 8729e0d7 Iustin Pop
    for node in nodenames:
4286 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4287 8729e0d7 Iustin Pop
      if not info:
4288 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4289 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4290 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4291 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4292 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4293 8729e0d7 Iustin Pop
                                   " node %s" % node)
4294 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4295 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4296 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4297 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4298 8729e0d7 Iustin Pop
4299 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4300 8729e0d7 Iustin Pop
    """Execute disk grow.
4301 8729e0d7 Iustin Pop

4302 8729e0d7 Iustin Pop
    """
4303 8729e0d7 Iustin Pop
    instance = self.instance
4304 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
4305 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4306 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4307 8729e0d7 Iustin Pop
      result = rpc.call_blockdev_grow(node, disk, self.op.amount)
4308 86de84dd Guido Trotter
      if not result or not isinstance(result, (list, tuple)) or len(result) != 2:
4309 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4310 8729e0d7 Iustin Pop
      elif not result[0]:
4311 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4312 8729e0d7 Iustin Pop
                                 (node, result[1]))
4313 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4314 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4315 8729e0d7 Iustin Pop
    return
4316 8729e0d7 Iustin Pop
4317 8729e0d7 Iustin Pop
4318 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4319 a8083063 Iustin Pop
  """Query runtime instance data.
4320 a8083063 Iustin Pop

4321 a8083063 Iustin Pop
  """
4322 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
4323 a987fa48 Guido Trotter
  REQ_BGL = False
4324 ae5849b5 Michael Hanselmann
4325 a987fa48 Guido Trotter
  def ExpandNames(self):
4326 a987fa48 Guido Trotter
    self.needed_locks = {}
4327 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4328 a987fa48 Guido Trotter
4329 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
4330 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4331 a987fa48 Guido Trotter
4332 a987fa48 Guido Trotter
    if self.op.instances:
4333 a987fa48 Guido Trotter
      self.wanted_names = []
4334 a987fa48 Guido Trotter
      for name in self.op.instances:
4335 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
4336 a987fa48 Guido Trotter
        if full_name is None:
4337 a987fa48 Guido Trotter
          raise errors.OpPrereqError("Instance '%s' not known" %
4338 a987fa48 Guido Trotter
                                     self.op.instance_name)
4339 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
4340 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4341 a987fa48 Guido Trotter
    else:
4342 a987fa48 Guido Trotter
      self.wanted_names = None
4343 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4344 a987fa48 Guido Trotter
4345 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4346 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4347 a987fa48 Guido Trotter
4348 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
4349 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
4350 a987fa48 Guido Trotter
      self._LockInstancesNodes()
4351 a8083063 Iustin Pop
4352 a8083063 Iustin Pop
  def CheckPrereq(self):
4353 a8083063 Iustin Pop
    """Check prerequisites.
4354 a8083063 Iustin Pop

4355 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4356 a8083063 Iustin Pop

4357 a8083063 Iustin Pop
    """
4358 a987fa48 Guido Trotter
    if self.wanted_names is None:
4359 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4360 a8083063 Iustin Pop
4361 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4362 a987fa48 Guido Trotter
                             in self.wanted_names]
4363 a987fa48 Guido Trotter
    return
4364 a8083063 Iustin Pop
4365 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4366 a8083063 Iustin Pop
    """Compute block device status.
4367 a8083063 Iustin Pop

4368 a8083063 Iustin Pop
    """
4369 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4370 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4371 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4372 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4373 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4374 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4375 a8083063 Iustin Pop
      else:
4376 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4377 a8083063 Iustin Pop
4378 a8083063 Iustin Pop
    if snode:
4379 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4380 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4381 a8083063 Iustin Pop
    else:
4382 a8083063 Iustin Pop
      dev_sstatus = None
4383 a8083063 Iustin Pop
4384 a8083063 Iustin Pop
    if dev.children:
4385 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4386 a8083063 Iustin Pop
                      for child in dev.children]
4387 a8083063 Iustin Pop
    else:
4388 a8083063 Iustin Pop
      dev_children = []
4389 a8083063 Iustin Pop
4390 a8083063 Iustin Pop
    data = {
4391 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4392 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4393 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4394 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4395 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4396 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4397 a8083063 Iustin Pop
      "children": dev_children,
4398 a8083063 Iustin Pop
      }
4399 a8083063 Iustin Pop
4400 a8083063 Iustin Pop
    return data
4401 a8083063 Iustin Pop
4402 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4403 a8083063 Iustin Pop
    """Gather and return data"""
4404 a8083063 Iustin Pop
    result = {}
4405 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4406 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4407 e69d05fd Iustin Pop
                                           instance.name,
4408 e69d05fd Iustin Pop
                                           instance.hypervisor)
4409 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4410 a8083063 Iustin Pop
        remote_state = "up"
4411 a8083063 Iustin Pop
      else:
4412 a8083063 Iustin Pop
        remote_state = "down"
4413 a8083063 Iustin Pop
      if instance.status == "down":
4414 a8083063 Iustin Pop
        config_state = "down"
4415 a8083063 Iustin Pop
      else:
4416 a8083063 Iustin Pop
        config_state = "up"
4417 a8083063 Iustin Pop
4418 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4419 a8083063 Iustin Pop
               for device in instance.disks]
4420 a8083063 Iustin Pop
4421 a8083063 Iustin Pop
      idict = {
4422 a8083063 Iustin Pop
        "name": instance.name,
4423 a8083063 Iustin Pop
        "config_state": config_state,
4424 a8083063 Iustin Pop
        "run_state": remote_state,
4425 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4426 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4427 a8083063 Iustin Pop
        "os": instance.os,
4428 a8083063 Iustin Pop
        "memory": instance.memory,
4429 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4430 a8083063 Iustin Pop
        "disks": disks,
4431 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4432 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
4433 a8083063 Iustin Pop
        }
4434 a8083063 Iustin Pop
4435 e69d05fd Iustin Pop
      htkind = instance.hypervisor
4436 00cd937c Iustin Pop
      if htkind == constants.HT_XEN_PVM:
4437 a8340917 Iustin Pop
        idict["kernel_path"] = instance.kernel_path
4438 a8340917 Iustin Pop
        idict["initrd_path"] = instance.initrd_path
4439 a8340917 Iustin Pop
4440 00cd937c Iustin Pop
      if htkind == constants.HT_XEN_HVM:
4441 a8340917 Iustin Pop
        idict["hvm_boot_order"] = instance.hvm_boot_order
4442 a8340917 Iustin Pop
        idict["hvm_acpi"] = instance.hvm_acpi
4443 a8340917 Iustin Pop
        idict["hvm_pae"] = instance.hvm_pae
4444 a8340917 Iustin Pop
        idict["hvm_cdrom_image_path"] = instance.hvm_cdrom_image_path
4445 5397e0b7 Alexander Schreiber
        idict["hvm_nic_type"] = instance.hvm_nic_type
4446 5397e0b7 Alexander Schreiber
        idict["hvm_disk_type"] = instance.hvm_disk_type
4447 a8340917 Iustin Pop
4448 a8340917 Iustin Pop
      if htkind in constants.HTS_REQ_PORT:
4449 d0c11cf7 Alexander Schreiber
        if instance.vnc_bind_address is None:
4450 d0c11cf7 Alexander Schreiber
          vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4451 d0c11cf7 Alexander Schreiber
        else:
4452 d0c11cf7 Alexander Schreiber
          vnc_bind_address = instance.vnc_bind_address
4453 34b6ab97 Alexander Schreiber
        if instance.network_port is None:
4454 34b6ab97 Alexander Schreiber
          vnc_console_port = None
4455 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.BIND_ADDRESS_GLOBAL:
4456 a4273aba Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.primary_node,
4457 34b6ab97 Alexander Schreiber
                                       instance.network_port)
4458 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.LOCALHOST_IP_ADDRESS:
4459 d0c11cf7 Alexander Schreiber
          vnc_console_port = "%s:%s on node %s" % (vnc_bind_address,
4460 a4273aba Alexander Schreiber
                                                   instance.network_port,
4461 a4273aba Alexander Schreiber
                                                   instance.primary_node)
4462 34b6ab97 Alexander Schreiber
        else:
4463 34b6ab97 Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.vnc_bind_address,
4464 34b6ab97 Alexander Schreiber
                                        instance.network_port)
4465 34b6ab97 Alexander Schreiber
        idict["vnc_console_port"] = vnc_console_port
4466 d0c11cf7 Alexander Schreiber
        idict["vnc_bind_address"] = vnc_bind_address
4467 a8340917 Iustin Pop
        idict["network_port"] = instance.network_port
4468 a8340917 Iustin Pop
4469 a8083063 Iustin Pop
      result[instance.name] = idict
4470 a8083063 Iustin Pop
4471 a8083063 Iustin Pop
    return result
4472 a8083063 Iustin Pop
4473 a8083063 Iustin Pop
4474 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4475 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4476 a8083063 Iustin Pop

4477 a8083063 Iustin Pop
  """
4478 a8083063 Iustin Pop
  HPATH = "instance-modify"
4479 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4480 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4481 1a5c7281 Guido Trotter
  REQ_BGL = False
4482 1a5c7281 Guido Trotter
4483 1a5c7281 Guido Trotter
  def ExpandNames(self):
4484 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4485 a8083063 Iustin Pop
4486 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4487 a8083063 Iustin Pop
    """Build hooks env.
4488 a8083063 Iustin Pop

4489 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4490 a8083063 Iustin Pop

4491 a8083063 Iustin Pop
    """
4492 396e1b78 Michael Hanselmann
    args = dict()
4493 a8083063 Iustin Pop
    if self.mem:
4494 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4495 a8083063 Iustin Pop
    if self.vcpus:
4496 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4497 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4498 396e1b78 Michael Hanselmann
      if self.do_ip:
4499 396e1b78 Michael Hanselmann
        ip = self.ip
4500 396e1b78 Michael Hanselmann
      else:
4501 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4502 396e1b78 Michael Hanselmann
      if self.bridge:
4503 396e1b78 Michael Hanselmann
        bridge = self.bridge
4504 396e1b78 Michael Hanselmann
      else:
4505 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4506 ef756965 Iustin Pop
      if self.mac:
4507 ef756965 Iustin Pop
        mac = self.mac
4508 ef756965 Iustin Pop
      else:
4509 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4510 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4511 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4512 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(),
4513 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4514 a8083063 Iustin Pop
    return env, nl, nl
4515 a8083063 Iustin Pop
4516 a8083063 Iustin Pop
  def CheckPrereq(self):
4517 a8083063 Iustin Pop
    """Check prerequisites.
4518 a8083063 Iustin Pop

4519 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4520 a8083063 Iustin Pop

4521 a8083063 Iustin Pop
    """
4522 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4523 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4524 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4525 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4526 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4527 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4528 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4529 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4530 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4531 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4532 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4533 31a853d2 Iustin Pop
    self.hvm_acpi = getattr(self.op, "hvm_acpi", None)
4534 31a853d2 Iustin Pop
    self.hvm_pae = getattr(self.op, "hvm_pae", None)
4535 5397e0b7 Alexander Schreiber
    self.hvm_nic_type = getattr(self.op, "hvm_nic_type", None)
4536 5397e0b7 Alexander Schreiber
    self.hvm_disk_type = getattr(self.op, "hvm_disk_type", None)
4537 31a853d2 Iustin Pop
    self.hvm_cdrom_image_path = getattr(self.op, "hvm_cdrom_image_path", None)
4538 31a853d2 Iustin Pop
    self.vnc_bind_address = getattr(self.op, "vnc_bind_address", None)
4539 4300c4b6 Guido Trotter
    self.force = getattr(self.op, "force", None)
4540 31a853d2 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4541 31a853d2 Iustin Pop
                 self.kernel_path, self.initrd_path, self.hvm_boot_order,
4542 31a853d2 Iustin Pop
                 self.hvm_acpi, self.hvm_pae, self.hvm_cdrom_image_path,
4543 5397e0b7 Alexander Schreiber
                 self.vnc_bind_address, self.hvm_nic_type, self.hvm_disk_type]
4544 31a853d2 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4545 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4546 a8083063 Iustin Pop
    if self.mem is not None:
4547 a8083063 Iustin Pop
      try:
4548 a8083063 Iustin Pop
        self.mem = int(self.mem)
4549 a8083063 Iustin Pop
      except ValueError, err:
4550 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4551 a8083063 Iustin Pop
    if self.vcpus is not None:
4552 a8083063 Iustin Pop
      try:
4553 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4554 a8083063 Iustin Pop
      except ValueError, err:
4555 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4556 a8083063 Iustin Pop
    if self.ip is not None:
4557 a8083063 Iustin Pop
      self.do_ip = True
4558 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4559 a8083063 Iustin Pop
        self.ip = None
4560 a8083063 Iustin Pop
      else:
4561 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4562 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4563 a8083063 Iustin Pop
    else:
4564 a8083063 Iustin Pop
      self.do_ip = False
4565 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4566 1862d460 Alexander Schreiber
    if self.mac is not None:
4567 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4568 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4569 1862d460 Alexander Schreiber
                                   self.mac)
4570 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4571 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4572 a8083063 Iustin Pop
4573 973d7867 Iustin Pop
    if self.kernel_path is not None:
4574 973d7867 Iustin Pop
      self.do_kernel_path = True
4575 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4576 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4577 973d7867 Iustin Pop
4578 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4579 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4580 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4581 973d7867 Iustin Pop
                                    " filename")
4582 8cafeb26 Iustin Pop
    else:
4583 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4584 973d7867 Iustin Pop
4585 973d7867 Iustin Pop
    if self.initrd_path is not None:
4586 973d7867 Iustin Pop
      self.do_initrd_path = True
4587 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4588 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4589 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4590 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4591 973d7867 Iustin Pop
                                    " filename")
4592 8cafeb26 Iustin Pop
    else:
4593 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4594 973d7867 Iustin Pop
4595 25c5878d Alexander Schreiber
    # boot order verification
4596 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4597 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4598 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4599 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4600 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4601 25c5878d Alexander Schreiber
                                     " or 'default'")
4602 25c5878d Alexander Schreiber
4603 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
4604 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
4605 3fc175f0 Alexander Schreiber
      if not (os.path.isabs(self.op.hvm_cdrom_image_path) or
4606 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4607 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
4608 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
4609 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4610 3fc175f0 Alexander Schreiber
      if not (os.path.isfile(self.op.hvm_cdrom_image_path) or
4611 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4612 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
4613 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
4614 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
4615 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4616 31a853d2 Iustin Pop
4617 31a853d2 Iustin Pop
    # vnc_bind_address verification
4618 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
4619 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
4620 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
4621 31a853d2 Iustin Pop
                                   " like a valid IP address" %
4622 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
4623 31a853d2 Iustin Pop
4624 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4625 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4626 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4627 cfefe007 Guido Trotter
    self.warn = []
4628 cfefe007 Guido Trotter
    if self.mem is not None and not self.force:
4629 cfefe007 Guido Trotter
      pnode = self.instance.primary_node
4630 cfefe007 Guido Trotter
      nodelist = [pnode]
4631 cfefe007 Guido Trotter
      nodelist.extend(instance.secondary_nodes)
4632 e69d05fd Iustin Pop
      instance_info = rpc.call_instance_info(pnode, instance.name,
4633 e69d05fd Iustin Pop
                                             instance.hypervisor)
4634 e69d05fd Iustin Pop
      nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName(),
4635 e69d05fd Iustin Pop
                                    instance.hypervisor)
4636 cfefe007 Guido Trotter
4637 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4638 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4639 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4640 cfefe007 Guido Trotter
      else:
4641 cfefe007 Guido Trotter
        if instance_info:
4642 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4643 cfefe007 Guido Trotter
        else:
4644 cfefe007 Guido Trotter
          # Assume instance not running
4645 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4646 cfefe007 Guido Trotter
          # and we have no other way to check)
4647 cfefe007 Guido Trotter
          current_mem = 0
4648 cfefe007 Guido Trotter
        miss_mem = self.mem - current_mem - nodeinfo[pnode]['memory_free']
4649 cfefe007 Guido Trotter
        if miss_mem > 0:
4650 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4651 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4652 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4653 cfefe007 Guido Trotter
4654 cfefe007 Guido Trotter
      for node in instance.secondary_nodes:
4655 cfefe007 Guido Trotter
        if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4656 cfefe007 Guido Trotter
          self.warn.append("Can't get info from secondary node %s" % node)
4657 cfefe007 Guido Trotter
        elif self.mem > nodeinfo[node]['memory_free']:
4658 cfefe007 Guido Trotter
          self.warn.append("Not enough memory to failover instance to secondary"
4659 cfefe007 Guido Trotter
                           " node %s" % node)
4660 cfefe007 Guido Trotter
4661 5bc84f33 Alexander Schreiber
    # Xen HVM device type checks
4662 00cd937c Iustin Pop
    if instance.hypervisor == constants.HT_XEN_HVM:
4663 5bc84f33 Alexander Schreiber
      if self.op.hvm_nic_type is not None:
4664 5bc84f33 Alexander Schreiber
        if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
4665 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid NIC type %s specified for Xen"
4666 5bc84f33 Alexander Schreiber
                                     " HVM  hypervisor" % self.op.hvm_nic_type)
4667 5bc84f33 Alexander Schreiber
      if self.op.hvm_disk_type is not None:
4668 5bc84f33 Alexander Schreiber
        if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
4669 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid disk type %s specified for Xen"
4670 5bc84f33 Alexander Schreiber
                                     " HVM hypervisor" % self.op.hvm_disk_type)
4671 5bc84f33 Alexander Schreiber
4672 a8083063 Iustin Pop
    return
4673 a8083063 Iustin Pop
4674 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4675 a8083063 Iustin Pop
    """Modifies an instance.
4676 a8083063 Iustin Pop

4677 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4678 a8083063 Iustin Pop
    """
4679 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4680 cfefe007 Guido Trotter
    # feedback_fn there.
4681 cfefe007 Guido Trotter
    for warn in self.warn:
4682 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4683 cfefe007 Guido Trotter
4684 a8083063 Iustin Pop
    result = []
4685 a8083063 Iustin Pop
    instance = self.instance
4686 a8083063 Iustin Pop
    if self.mem:
4687 a8083063 Iustin Pop
      instance.memory = self.mem
4688 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4689 a8083063 Iustin Pop
    if self.vcpus:
4690 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4691 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4692 a8083063 Iustin Pop
    if self.do_ip:
4693 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4694 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4695 a8083063 Iustin Pop
    if self.bridge:
4696 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4697 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4698 1862d460 Alexander Schreiber
    if self.mac:
4699 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4700 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4701 973d7867 Iustin Pop
    if self.do_kernel_path:
4702 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4703 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4704 973d7867 Iustin Pop
    if self.do_initrd_path:
4705 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4706 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4707 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4708 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4709 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4710 25c5878d Alexander Schreiber
      else:
4711 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4712 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4713 3fc175f0 Alexander Schreiber
    if self.hvm_acpi is not None:
4714 ec1ba002 Iustin Pop
      instance.hvm_acpi = self.hvm_acpi
4715 31a853d2 Iustin Pop
      result.append(("hvm_acpi", self.hvm_acpi))
4716 3fc175f0 Alexander Schreiber
    if self.hvm_pae is not None:
4717 ec1ba002 Iustin Pop
      instance.hvm_pae = self.hvm_pae
4718 31a853d2 Iustin Pop
      result.append(("hvm_pae", self.hvm_pae))
4719 5397e0b7 Alexander Schreiber
    if self.hvm_nic_type is not None:
4720 5397e0b7 Alexander Schreiber
      instance.hvm_nic_type = self.hvm_nic_type
4721 5397e0b7 Alexander Schreiber
      result.append(("hvm_nic_type", self.hvm_nic_type))
4722 5397e0b7 Alexander Schreiber
    if self.hvm_disk_type is not None:
4723 5397e0b7 Alexander Schreiber
      instance.hvm_disk_type = self.hvm_disk_type
4724 5397e0b7 Alexander Schreiber
      result.append(("hvm_disk_type", self.hvm_disk_type))
4725 31a853d2 Iustin Pop
    if self.hvm_cdrom_image_path:
4726 3fc175f0 Alexander Schreiber
      if self.hvm_cdrom_image_path == constants.VALUE_NONE:
4727 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = None
4728 3fc175f0 Alexander Schreiber
      else:
4729 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
4730 31a853d2 Iustin Pop
      result.append(("hvm_cdrom_image_path", self.hvm_cdrom_image_path))
4731 31a853d2 Iustin Pop
    if self.vnc_bind_address:
4732 31a853d2 Iustin Pop
      instance.vnc_bind_address = self.vnc_bind_address
4733 31a853d2 Iustin Pop
      result.append(("vnc_bind_address", self.vnc_bind_address))
4734 a8083063 Iustin Pop
4735 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4736 a8083063 Iustin Pop
4737 a8083063 Iustin Pop
    return result
4738 a8083063 Iustin Pop
4739 a8083063 Iustin Pop
4740 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4741 a8083063 Iustin Pop
  """Query the exports list
4742 a8083063 Iustin Pop

4743 a8083063 Iustin Pop
  """
4744 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
4745 21a15682 Guido Trotter
  REQ_BGL = False
4746 21a15682 Guido Trotter
4747 21a15682 Guido Trotter
  def ExpandNames(self):
4748 21a15682 Guido Trotter
    self.needed_locks = {}
4749 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4750 21a15682 Guido Trotter
    if not self.op.nodes:
4751 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4752 21a15682 Guido Trotter
    else:
4753 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
4754 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
4755 a8083063 Iustin Pop
4756 a8083063 Iustin Pop
  def CheckPrereq(self):
4757 21a15682 Guido Trotter
    """Check prerequisites.
4758 a8083063 Iustin Pop

4759 a8083063 Iustin Pop
    """
4760 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
4761 a8083063 Iustin Pop
4762 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4763 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4764 a8083063 Iustin Pop

4765 a8083063 Iustin Pop
    Returns:
4766 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4767 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4768 a8083063 Iustin Pop
      that node.
4769 a8083063 Iustin Pop

4770 a8083063 Iustin Pop
    """
4771 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4772 a8083063 Iustin Pop
4773 a8083063 Iustin Pop
4774 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4775 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4776 a8083063 Iustin Pop

4777 a8083063 Iustin Pop
  """
4778 a8083063 Iustin Pop
  HPATH = "instance-export"
4779 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4780 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4781 6657590e Guido Trotter
  REQ_BGL = False
4782 6657590e Guido Trotter
4783 6657590e Guido Trotter
  def ExpandNames(self):
4784 6657590e Guido Trotter
    self._ExpandAndLockInstance()
4785 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
4786 6657590e Guido Trotter
    #
4787 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
4788 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
4789 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
4790 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
4791 6657590e Guido Trotter
    #    then one to remove, after
4792 6657590e Guido Trotter
    #  - removing the removal operation altoghether
4793 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4794 6657590e Guido Trotter
4795 6657590e Guido Trotter
  def DeclareLocks(self, level):
4796 6657590e Guido Trotter
    """Last minute lock declaration."""
4797 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
4798 a8083063 Iustin Pop
4799 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4800 a8083063 Iustin Pop
    """Build hooks env.
4801 a8083063 Iustin Pop

4802 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4803 a8083063 Iustin Pop

4804 a8083063 Iustin Pop
    """
4805 a8083063 Iustin Pop
    env = {
4806 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4807 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4808 a8083063 Iustin Pop
      }
4809 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4810 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
4811 a8083063 Iustin Pop
          self.op.target_node]
4812 a8083063 Iustin Pop
    return env, nl, nl
4813 a8083063 Iustin Pop
4814 a8083063 Iustin Pop
  def CheckPrereq(self):
4815 a8083063 Iustin Pop
    """Check prerequisites.
4816 a8083063 Iustin Pop

4817 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4818 a8083063 Iustin Pop

4819 a8083063 Iustin Pop
    """
4820 6657590e Guido Trotter
    instance_name = self.op.instance_name
4821 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4822 6657590e Guido Trotter
    assert self.instance is not None, \
4823 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
4824 a8083063 Iustin Pop
4825 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
4826 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
4827 a8083063 Iustin Pop
4828 6657590e Guido Trotter
    assert self.dst_node is not None, \
4829 6657590e Guido Trotter
          "Cannot retrieve locked node %s" % self.op.target_node
4830 a8083063 Iustin Pop
4831 b6023d6c Manuel Franceschini
    # instance disk type verification
4832 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4833 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4834 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4835 b6023d6c Manuel Franceschini
                                   " file-based disks")
4836 b6023d6c Manuel Franceschini
4837 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4838 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4839 a8083063 Iustin Pop

4840 a8083063 Iustin Pop
    """
4841 a8083063 Iustin Pop
    instance = self.instance
4842 a8083063 Iustin Pop
    dst_node = self.dst_node
4843 a8083063 Iustin Pop
    src_node = instance.primary_node
4844 a8083063 Iustin Pop
    if self.op.shutdown:
4845 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4846 fb300fb7 Guido Trotter
      if not rpc.call_instance_shutdown(src_node, instance):
4847 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4848 38206f3c Iustin Pop
                                 (instance.name, src_node))
4849 a8083063 Iustin Pop
4850 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4851 a8083063 Iustin Pop
4852 a8083063 Iustin Pop
    snap_disks = []
4853 a8083063 Iustin Pop
4854 a8083063 Iustin Pop
    try:
4855 a8083063 Iustin Pop
      for disk in instance.disks:
4856 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4857 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4858 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4859 a8083063 Iustin Pop
4860 a8083063 Iustin Pop
          if not new_dev_name:
4861 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4862 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4863 a8083063 Iustin Pop
          else:
4864 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4865 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4866 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4867 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4868 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4869 a8083063 Iustin Pop
4870 a8083063 Iustin Pop
    finally:
4871 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4872 fb300fb7 Guido Trotter
        if not rpc.call_instance_start(src_node, instance, None):
4873 fb300fb7 Guido Trotter
          _ShutdownInstanceDisks(instance, self.cfg)
4874 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4875 a8083063 Iustin Pop
4876 a8083063 Iustin Pop
    # TODO: check for size
4877 a8083063 Iustin Pop
4878 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
4879 a8083063 Iustin Pop
    for dev in snap_disks:
4880 62c9ec92 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
4881 62c9ec92 Iustin Pop
                                      instance, cluster_name):
4882 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4883 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4884 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4885 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4886 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4887 a8083063 Iustin Pop
4888 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4889 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4890 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4891 a8083063 Iustin Pop
4892 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4893 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4894 a8083063 Iustin Pop
4895 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4896 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4897 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4898 a8083063 Iustin Pop
    if nodelist:
4899 204f2086 Guido Trotter
      exportlist = rpc.call_export_list(nodelist)
4900 a8083063 Iustin Pop
      for node in exportlist:
4901 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4902 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4903 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4904 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4905 5c947f38 Iustin Pop
4906 5c947f38 Iustin Pop
4907 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4908 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4909 9ac99fda Guido Trotter

4910 9ac99fda Guido Trotter
  """
4911 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4912 3656b3af Guido Trotter
  REQ_BGL = False
4913 3656b3af Guido Trotter
4914 3656b3af Guido Trotter
  def ExpandNames(self):
4915 3656b3af Guido Trotter
    self.needed_locks = {}
4916 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
4917 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
4918 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
4919 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4920 9ac99fda Guido Trotter
4921 9ac99fda Guido Trotter
  def CheckPrereq(self):
4922 9ac99fda Guido Trotter
    """Check prerequisites.
4923 9ac99fda Guido Trotter
    """
4924 9ac99fda Guido Trotter
    pass
4925 9ac99fda Guido Trotter
4926 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4927 9ac99fda Guido Trotter
    """Remove any export.
4928 9ac99fda Guido Trotter

4929 9ac99fda Guido Trotter
    """
4930 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4931 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4932 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4933 9ac99fda Guido Trotter
    fqdn_warn = False
4934 9ac99fda Guido Trotter
    if not instance_name:
4935 9ac99fda Guido Trotter
      fqdn_warn = True
4936 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4937 9ac99fda Guido Trotter
4938 3656b3af Guido Trotter
    exportlist = rpc.call_export_list(self.acquired_locks[locking.LEVEL_NODE])
4939 9ac99fda Guido Trotter
    found = False
4940 9ac99fda Guido Trotter
    for node in exportlist:
4941 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4942 9ac99fda Guido Trotter
        found = True
4943 9ac99fda Guido Trotter
        if not rpc.call_export_remove(node, instance_name):
4944 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4945 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4946 9ac99fda Guido Trotter
4947 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4948 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4949 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4950 9ac99fda Guido Trotter
                  " Domain Name.")
4951 9ac99fda Guido Trotter
4952 9ac99fda Guido Trotter
4953 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4954 5c947f38 Iustin Pop
  """Generic tags LU.
4955 5c947f38 Iustin Pop

4956 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4957 5c947f38 Iustin Pop

4958 5c947f38 Iustin Pop
  """
4959 5c947f38 Iustin Pop
4960 8646adce Guido Trotter
  def ExpandNames(self):
4961 8646adce Guido Trotter
    self.needed_locks = {}
4962 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
4963 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4964 5c947f38 Iustin Pop
      if name is None:
4965 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4966 3ecf6786 Iustin Pop
                                   (self.op.name,))
4967 5c947f38 Iustin Pop
      self.op.name = name
4968 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
4969 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4970 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4971 5c947f38 Iustin Pop
      if name is None:
4972 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4973 3ecf6786 Iustin Pop
                                   (self.op.name,))
4974 5c947f38 Iustin Pop
      self.op.name = name
4975 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
4976 8646adce Guido Trotter
4977 8646adce Guido Trotter
  def CheckPrereq(self):
4978 8646adce Guido Trotter
    """Check prerequisites.
4979 8646adce Guido Trotter

4980 8646adce Guido Trotter
    """
4981 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
4982 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
4983 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
4984 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
4985 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
4986 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
4987 5c947f38 Iustin Pop
    else:
4988 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4989 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4990 5c947f38 Iustin Pop
4991 5c947f38 Iustin Pop
4992 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4993 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4994 5c947f38 Iustin Pop

4995 5c947f38 Iustin Pop
  """
4996 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4997 8646adce Guido Trotter
  REQ_BGL = False
4998 5c947f38 Iustin Pop
4999 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5000 5c947f38 Iustin Pop
    """Returns the tag list.
5001 5c947f38 Iustin Pop

5002 5c947f38 Iustin Pop
    """
5003 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
5004 5c947f38 Iustin Pop
5005 5c947f38 Iustin Pop
5006 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
5007 73415719 Iustin Pop
  """Searches the tags for a given pattern.
5008 73415719 Iustin Pop

5009 73415719 Iustin Pop
  """
5010 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
5011 8646adce Guido Trotter
  REQ_BGL = False
5012 8646adce Guido Trotter
5013 8646adce Guido Trotter
  def ExpandNames(self):
5014 8646adce Guido Trotter
    self.needed_locks = {}
5015 73415719 Iustin Pop
5016 73415719 Iustin Pop
  def CheckPrereq(self):
5017 73415719 Iustin Pop
    """Check prerequisites.
5018 73415719 Iustin Pop

5019 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
5020 73415719 Iustin Pop

5021 73415719 Iustin Pop
    """
5022 73415719 Iustin Pop
    try:
5023 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
5024 73415719 Iustin Pop
    except re.error, err:
5025 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
5026 73415719 Iustin Pop
                                 (self.op.pattern, err))
5027 73415719 Iustin Pop
5028 73415719 Iustin Pop
  def Exec(self, feedback_fn):
5029 73415719 Iustin Pop
    """Returns the tag list.
5030 73415719 Iustin Pop

5031 73415719 Iustin Pop
    """
5032 73415719 Iustin Pop
    cfg = self.cfg
5033 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
5034 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
5035 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
5036 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
5037 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
5038 73415719 Iustin Pop
    results = []
5039 73415719 Iustin Pop
    for path, target in tgts:
5040 73415719 Iustin Pop
      for tag in target.GetTags():
5041 73415719 Iustin Pop
        if self.re.search(tag):
5042 73415719 Iustin Pop
          results.append((path, tag))
5043 73415719 Iustin Pop
    return results
5044 73415719 Iustin Pop
5045 73415719 Iustin Pop
5046 f27302fa Iustin Pop
class LUAddTags(TagsLU):
5047 5c947f38 Iustin Pop
  """Sets a tag on a given object.
5048 5c947f38 Iustin Pop

5049 5c947f38 Iustin Pop
  """
5050 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5051 8646adce Guido Trotter
  REQ_BGL = False
5052 5c947f38 Iustin Pop
5053 5c947f38 Iustin Pop
  def CheckPrereq(self):
5054 5c947f38 Iustin Pop
    """Check prerequisites.
5055 5c947f38 Iustin Pop

5056 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
5057 5c947f38 Iustin Pop

5058 5c947f38 Iustin Pop
    """
5059 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5060 f27302fa Iustin Pop
    for tag in self.op.tags:
5061 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5062 5c947f38 Iustin Pop
5063 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5064 5c947f38 Iustin Pop
    """Sets the tag.
5065 5c947f38 Iustin Pop

5066 5c947f38 Iustin Pop
    """
5067 5c947f38 Iustin Pop
    try:
5068 f27302fa Iustin Pop
      for tag in self.op.tags:
5069 f27302fa Iustin Pop
        self.target.AddTag(tag)
5070 5c947f38 Iustin Pop
    except errors.TagError, err:
5071 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
5072 5c947f38 Iustin Pop
    try:
5073 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5074 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5075 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5076 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5077 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5078 5c947f38 Iustin Pop
5079 5c947f38 Iustin Pop
5080 f27302fa Iustin Pop
class LUDelTags(TagsLU):
5081 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
5082 5c947f38 Iustin Pop

5083 5c947f38 Iustin Pop
  """
5084 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5085 8646adce Guido Trotter
  REQ_BGL = False
5086 5c947f38 Iustin Pop
5087 5c947f38 Iustin Pop
  def CheckPrereq(self):
5088 5c947f38 Iustin Pop
    """Check prerequisites.
5089 5c947f38 Iustin Pop

5090 5c947f38 Iustin Pop
    This checks that we have the given tag.
5091 5c947f38 Iustin Pop

5092 5c947f38 Iustin Pop
    """
5093 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5094 f27302fa Iustin Pop
    for tag in self.op.tags:
5095 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5096 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
5097 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
5098 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
5099 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
5100 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
5101 f27302fa Iustin Pop
      diff_names.sort()
5102 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
5103 f27302fa Iustin Pop
                                 (",".join(diff_names)))
5104 5c947f38 Iustin Pop
5105 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5106 5c947f38 Iustin Pop
    """Remove the tag from the object.
5107 5c947f38 Iustin Pop

5108 5c947f38 Iustin Pop
    """
5109 f27302fa Iustin Pop
    for tag in self.op.tags:
5110 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
5111 5c947f38 Iustin Pop
    try:
5112 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5113 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5114 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5115 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5116 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5117 06009e27 Iustin Pop
5118 0eed6e61 Guido Trotter
5119 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
5120 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
5121 06009e27 Iustin Pop

5122 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
5123 06009e27 Iustin Pop
  time.
5124 06009e27 Iustin Pop

5125 06009e27 Iustin Pop
  """
5126 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5127 fbe9022f Guido Trotter
  REQ_BGL = False
5128 06009e27 Iustin Pop
5129 fbe9022f Guido Trotter
  def ExpandNames(self):
5130 fbe9022f Guido Trotter
    """Expand names and set required locks.
5131 06009e27 Iustin Pop

5132 fbe9022f Guido Trotter
    This expands the node list, if any.
5133 06009e27 Iustin Pop

5134 06009e27 Iustin Pop
    """
5135 fbe9022f Guido Trotter
    self.needed_locks = {}
5136 06009e27 Iustin Pop
    if self.op.on_nodes:
5137 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
5138 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5139 fbe9022f Guido Trotter
      # more information.
5140 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5141 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5142 fbe9022f Guido Trotter
5143 fbe9022f Guido Trotter
  def CheckPrereq(self):
5144 fbe9022f Guido Trotter
    """Check prerequisites.
5145 fbe9022f Guido Trotter

5146 fbe9022f Guido Trotter
    """
5147 06009e27 Iustin Pop
5148 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
5149 06009e27 Iustin Pop
    """Do the actual sleep.
5150 06009e27 Iustin Pop

5151 06009e27 Iustin Pop
    """
5152 06009e27 Iustin Pop
    if self.op.on_master:
5153 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
5154 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
5155 06009e27 Iustin Pop
    if self.op.on_nodes:
5156 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5157 06009e27 Iustin Pop
      if not result:
5158 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
5159 06009e27 Iustin Pop
      for node, node_result in result.items():
5160 06009e27 Iustin Pop
        if not node_result:
5161 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
5162 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
5163 d61df03e Iustin Pop
5164 d61df03e Iustin Pop
5165 d1c2dd75 Iustin Pop
class IAllocator(object):
5166 d1c2dd75 Iustin Pop
  """IAllocator framework.
5167 d61df03e Iustin Pop

5168 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
5169 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
5170 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
5171 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
5172 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
5173 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
5174 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
5175 d1c2dd75 Iustin Pop
      easy usage
5176 d61df03e Iustin Pop

5177 d61df03e Iustin Pop
  """
5178 29859cb7 Iustin Pop
  _ALLO_KEYS = [
5179 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
5180 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
5181 d1c2dd75 Iustin Pop
    ]
5182 29859cb7 Iustin Pop
  _RELO_KEYS = [
5183 29859cb7 Iustin Pop
    "relocate_from",
5184 29859cb7 Iustin Pop
    ]
5185 d1c2dd75 Iustin Pop
5186 d6a02168 Michael Hanselmann
  def __init__(self, cfg, mode, name, **kwargs):
5187 d1c2dd75 Iustin Pop
    self.cfg = cfg
5188 d1c2dd75 Iustin Pop
    # init buffer variables
5189 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
5190 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
5191 29859cb7 Iustin Pop
    self.mode = mode
5192 29859cb7 Iustin Pop
    self.name = name
5193 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
5194 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
5195 29859cb7 Iustin Pop
    self.relocate_from = None
5196 27579978 Iustin Pop
    # computed fields
5197 27579978 Iustin Pop
    self.required_nodes = None
5198 d1c2dd75 Iustin Pop
    # init result fields
5199 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
5200 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5201 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
5202 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5203 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
5204 29859cb7 Iustin Pop
    else:
5205 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5206 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
5207 d1c2dd75 Iustin Pop
    for key in kwargs:
5208 29859cb7 Iustin Pop
      if key not in keyset:
5209 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5210 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5211 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
5212 29859cb7 Iustin Pop
    for key in keyset:
5213 d1c2dd75 Iustin Pop
      if key not in kwargs:
5214 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5215 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5216 d1c2dd75 Iustin Pop
    self._BuildInputData()
5217 d1c2dd75 Iustin Pop
5218 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
5219 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
5220 d1c2dd75 Iustin Pop

5221 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
5222 d1c2dd75 Iustin Pop

5223 d1c2dd75 Iustin Pop
    """
5224 d1c2dd75 Iustin Pop
    cfg = self.cfg
5225 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
5226 d1c2dd75 Iustin Pop
    # cluster data
5227 d1c2dd75 Iustin Pop
    data = {
5228 d1c2dd75 Iustin Pop
      "version": 1,
5229 d6a02168 Michael Hanselmann
      "cluster_name": self.cfg.GetClusterName(),
5230 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
5231 e69d05fd Iustin Pop
      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5232 d1c2dd75 Iustin Pop
      # we don't have job IDs
5233 d61df03e Iustin Pop
      }
5234 d61df03e Iustin Pop
5235 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
5236 6286519f Iustin Pop
5237 d1c2dd75 Iustin Pop
    # node data
5238 d1c2dd75 Iustin Pop
    node_results = {}
5239 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
5240 e69d05fd Iustin Pop
    # FIXME: here we have only one hypervisor information, but
5241 e69d05fd Iustin Pop
    # instance can belong to different hypervisors
5242 e69d05fd Iustin Pop
    node_data = rpc.call_node_info(node_list, cfg.GetVGName(),
5243 e69d05fd Iustin Pop
                                   cfg.GetHypervisorType())
5244 d1c2dd75 Iustin Pop
    for nname in node_list:
5245 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
5246 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
5247 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
5248 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
5249 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5250 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
5251 d1c2dd75 Iustin Pop
        if attr not in remote_info:
5252 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5253 d1c2dd75 Iustin Pop
                                   (nname, attr))
5254 d1c2dd75 Iustin Pop
        try:
5255 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
5256 d1c2dd75 Iustin Pop
        except ValueError, err:
5257 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5258 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
5259 6286519f Iustin Pop
      # compute memory used by primary instances
5260 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
5261 6286519f Iustin Pop
      for iinfo in i_list:
5262 6286519f Iustin Pop
        if iinfo.primary_node == nname:
5263 6286519f Iustin Pop
          i_p_mem += iinfo.memory
5264 6286519f Iustin Pop
          if iinfo.status == "up":
5265 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
5266 6286519f Iustin Pop
5267 b2662e7f Iustin Pop
      # compute memory used by instances
5268 d1c2dd75 Iustin Pop
      pnr = {
5269 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
5270 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
5271 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
5272 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
5273 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
5274 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
5275 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
5276 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
5277 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
5278 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5279 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5280 d1c2dd75 Iustin Pop
        }
5281 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5282 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5283 d1c2dd75 Iustin Pop
5284 d1c2dd75 Iustin Pop
    # instance data
5285 d1c2dd75 Iustin Pop
    instance_data = {}
5286 6286519f Iustin Pop
    for iinfo in i_list:
5287 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5288 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5289 d1c2dd75 Iustin Pop
      pir = {
5290 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5291 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5292 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
5293 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
5294 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5295 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5296 d1c2dd75 Iustin Pop
        "nics": nic_data,
5297 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5298 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5299 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
5300 d1c2dd75 Iustin Pop
        }
5301 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5302 d61df03e Iustin Pop
5303 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5304 d61df03e Iustin Pop
5305 d1c2dd75 Iustin Pop
    self.in_data = data
5306 d61df03e Iustin Pop
5307 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5308 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5309 d61df03e Iustin Pop

5310 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5311 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5312 d61df03e Iustin Pop

5313 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5314 d1c2dd75 Iustin Pop
    done.
5315 d61df03e Iustin Pop

5316 d1c2dd75 Iustin Pop
    """
5317 d1c2dd75 Iustin Pop
    data = self.in_data
5318 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5319 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5320 d1c2dd75 Iustin Pop
5321 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
5322 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
5323 d1c2dd75 Iustin Pop
5324 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5325 27579978 Iustin Pop
      self.required_nodes = 2
5326 27579978 Iustin Pop
    else:
5327 27579978 Iustin Pop
      self.required_nodes = 1
5328 d1c2dd75 Iustin Pop
    request = {
5329 d1c2dd75 Iustin Pop
      "type": "allocate",
5330 d1c2dd75 Iustin Pop
      "name": self.name,
5331 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5332 d1c2dd75 Iustin Pop
      "tags": self.tags,
5333 d1c2dd75 Iustin Pop
      "os": self.os,
5334 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5335 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5336 d1c2dd75 Iustin Pop
      "disks": self.disks,
5337 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5338 d1c2dd75 Iustin Pop
      "nics": self.nics,
5339 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5340 d1c2dd75 Iustin Pop
      }
5341 d1c2dd75 Iustin Pop
    data["request"] = request
5342 298fe380 Iustin Pop
5343 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5344 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5345 298fe380 Iustin Pop

5346 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5347 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5348 d61df03e Iustin Pop

5349 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5350 d1c2dd75 Iustin Pop
    done.
5351 d61df03e Iustin Pop

5352 d1c2dd75 Iustin Pop
    """
5353 27579978 Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.name)
5354 27579978 Iustin Pop
    if instance is None:
5355 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5356 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5357 27579978 Iustin Pop
5358 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5359 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5360 27579978 Iustin Pop
5361 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5362 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5363 2a139bb0 Iustin Pop
5364 27579978 Iustin Pop
    self.required_nodes = 1
5365 27579978 Iustin Pop
5366 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
5367 27579978 Iustin Pop
                                  instance.disks[0].size,
5368 27579978 Iustin Pop
                                  instance.disks[1].size)
5369 27579978 Iustin Pop
5370 d1c2dd75 Iustin Pop
    request = {
5371 2a139bb0 Iustin Pop
      "type": "relocate",
5372 d1c2dd75 Iustin Pop
      "name": self.name,
5373 27579978 Iustin Pop
      "disk_space_total": disk_space,
5374 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5375 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5376 d1c2dd75 Iustin Pop
      }
5377 27579978 Iustin Pop
    self.in_data["request"] = request
5378 d61df03e Iustin Pop
5379 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5380 d1c2dd75 Iustin Pop
    """Build input data structures.
5381 d61df03e Iustin Pop

5382 d1c2dd75 Iustin Pop
    """
5383 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5384 d61df03e Iustin Pop
5385 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5386 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5387 d1c2dd75 Iustin Pop
    else:
5388 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5389 d61df03e Iustin Pop
5390 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5391 d61df03e Iustin Pop
5392 8d528b7c Iustin Pop
  def Run(self, name, validate=True, call_fn=rpc.call_iallocator_runner):
5393 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5394 298fe380 Iustin Pop

5395 d1c2dd75 Iustin Pop
    """
5396 d1c2dd75 Iustin Pop
    data = self.in_text
5397 298fe380 Iustin Pop
5398 d6a02168 Michael Hanselmann
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
5399 298fe380 Iustin Pop
5400 43f5ea7a Guido Trotter
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5401 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5402 8d528b7c Iustin Pop
5403 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5404 8d528b7c Iustin Pop
5405 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5406 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5407 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5408 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5409 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5410 8d528b7c Iustin Pop
    self.out_text = stdout
5411 d1c2dd75 Iustin Pop
    if validate:
5412 d1c2dd75 Iustin Pop
      self._ValidateResult()
5413 298fe380 Iustin Pop
5414 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5415 d1c2dd75 Iustin Pop
    """Process the allocator results.
5416 538475ca Iustin Pop

5417 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5418 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5419 538475ca Iustin Pop

5420 d1c2dd75 Iustin Pop
    """
5421 d1c2dd75 Iustin Pop
    try:
5422 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5423 d1c2dd75 Iustin Pop
    except Exception, err:
5424 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5425 d1c2dd75 Iustin Pop
5426 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5427 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5428 538475ca Iustin Pop
5429 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5430 d1c2dd75 Iustin Pop
      if key not in rdict:
5431 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5432 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5433 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5434 538475ca Iustin Pop
5435 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5436 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5437 d1c2dd75 Iustin Pop
                               " is not a list")
5438 d1c2dd75 Iustin Pop
    self.out_data = rdict
5439 538475ca Iustin Pop
5440 538475ca Iustin Pop
5441 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5442 d61df03e Iustin Pop
  """Run allocator tests.
5443 d61df03e Iustin Pop

5444 d61df03e Iustin Pop
  This LU runs the allocator tests
5445 d61df03e Iustin Pop

5446 d61df03e Iustin Pop
  """
5447 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5448 d61df03e Iustin Pop
5449 d61df03e Iustin Pop
  def CheckPrereq(self):
5450 d61df03e Iustin Pop
    """Check prerequisites.
5451 d61df03e Iustin Pop

5452 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5453 d61df03e Iustin Pop

5454 d61df03e Iustin Pop
    """
5455 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5456 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5457 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5458 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5459 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5460 d61df03e Iustin Pop
                                     attr)
5461 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5462 d61df03e Iustin Pop
      if iname is not None:
5463 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5464 d61df03e Iustin Pop
                                   iname)
5465 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5466 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5467 d61df03e Iustin Pop
      for row in self.op.nics:
5468 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5469 d61df03e Iustin Pop
            "mac" not in row or
5470 d61df03e Iustin Pop
            "ip" not in row or
5471 d61df03e Iustin Pop
            "bridge" not in row):
5472 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5473 d61df03e Iustin Pop
                                     " 'nics' parameter")
5474 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5475 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5476 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5477 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5478 d61df03e Iustin Pop
      for row in self.op.disks:
5479 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5480 d61df03e Iustin Pop
            "size" not in row or
5481 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5482 d61df03e Iustin Pop
            "mode" not in row or
5483 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5484 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5485 d61df03e Iustin Pop
                                     " 'disks' parameter")
5486 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5487 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5488 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5489 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5490 d61df03e Iustin Pop
      if fname is None:
5491 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5492 d61df03e Iustin Pop
                                   self.op.name)
5493 d61df03e Iustin Pop
      self.op.name = fname
5494 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5495 d61df03e Iustin Pop
    else:
5496 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5497 d61df03e Iustin Pop
                                 self.op.mode)
5498 d61df03e Iustin Pop
5499 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5500 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5501 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5502 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5503 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5504 d61df03e Iustin Pop
                                 self.op.direction)
5505 d61df03e Iustin Pop
5506 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5507 d61df03e Iustin Pop
    """Run the allocator test.
5508 d61df03e Iustin Pop

5509 d61df03e Iustin Pop
    """
5510 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5511 d6a02168 Michael Hanselmann
      ial = IAllocator(self.cfg,
5512 29859cb7 Iustin Pop
                       mode=self.op.mode,
5513 29859cb7 Iustin Pop
                       name=self.op.name,
5514 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5515 29859cb7 Iustin Pop
                       disks=self.op.disks,
5516 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5517 29859cb7 Iustin Pop
                       os=self.op.os,
5518 29859cb7 Iustin Pop
                       tags=self.op.tags,
5519 29859cb7 Iustin Pop
                       nics=self.op.nics,
5520 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5521 29859cb7 Iustin Pop
                       )
5522 29859cb7 Iustin Pop
    else:
5523 d6a02168 Michael Hanselmann
      ial = IAllocator(self.cfg,
5524 29859cb7 Iustin Pop
                       mode=self.op.mode,
5525 29859cb7 Iustin Pop
                       name=self.op.name,
5526 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5527 29859cb7 Iustin Pop
                       )
5528 d61df03e Iustin Pop
5529 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5530 d1c2dd75 Iustin Pop
      result = ial.in_text
5531 298fe380 Iustin Pop
    else:
5532 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5533 d1c2dd75 Iustin Pop
      result = ial.out_text
5534 298fe380 Iustin Pop
    return result