Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 3ccafd0e

History | View | Annotate | Download (187.8 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 74409b12 Iustin Pop
import copy
35 a8083063 Iustin Pop
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import logger
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 6048c986 Guido Trotter
from ganeti import locking
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 8d14b30d Iustin Pop
from ganeti import serializer
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 d465bdc8 Guido Trotter
    - implement ExpandNames
53 d465bdc8 Guido Trotter
    - implement CheckPrereq
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 05f86716 Guido Trotter
    - optionally redefine their run requirements:
58 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 a8083063 Iustin Pop
  REQ_MASTER = True
68 7e55040e Guido Trotter
  REQ_BGL = True
69 a8083063 Iustin Pop
70 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
71 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
72 a8083063 Iustin Pop

73 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
74 a8083063 Iustin Pop
    validity.
75 a8083063 Iustin Pop

76 a8083063 Iustin Pop
    """
77 5bfac263 Iustin Pop
    self.proc = processor
78 a8083063 Iustin Pop
    self.op = op
79 77b657a3 Guido Trotter
    self.cfg = context.cfg
80 77b657a3 Guido Trotter
    self.context = context
81 72737a7f Iustin Pop
    self.rpc = rpc
82 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
83 d465bdc8 Guido Trotter
    self.needed_locks = None
84 6683bba2 Guido Trotter
    self.acquired_locks = {}
85 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
86 ca2a79e1 Guido Trotter
    self.add_locks = {}
87 ca2a79e1 Guido Trotter
    self.remove_locks = {}
88 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
89 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
90 c92b310a Michael Hanselmann
    self.__ssh = None
91 c92b310a Michael Hanselmann
92 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
93 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
94 a8083063 Iustin Pop
      if attr_val is None:
95 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
96 3ecf6786 Iustin Pop
                                   attr_name)
97 c6d58a2b Michael Hanselmann
98 f64c9de6 Guido Trotter
    if not self.cfg.IsCluster():
99 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
100 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
101 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
102 d6a02168 Michael Hanselmann
      master = self.cfg.GetMasterNode()
103 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
104 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
105 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
106 a8083063 Iustin Pop
107 c92b310a Michael Hanselmann
  def __GetSSH(self):
108 c92b310a Michael Hanselmann
    """Returns the SshRunner object
109 c92b310a Michael Hanselmann

110 c92b310a Michael Hanselmann
    """
111 c92b310a Michael Hanselmann
    if not self.__ssh:
112 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
113 c92b310a Michael Hanselmann
    return self.__ssh
114 c92b310a Michael Hanselmann
115 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
116 c92b310a Michael Hanselmann
117 d465bdc8 Guido Trotter
  def ExpandNames(self):
118 d465bdc8 Guido Trotter
    """Expand names for this LU.
119 d465bdc8 Guido Trotter

120 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
121 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
122 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
123 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
124 d465bdc8 Guido Trotter

125 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
126 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
127 d465bdc8 Guido Trotter
    as values. Rules:
128 d465bdc8 Guido Trotter
      - Use an empty dict if you don't need any lock
129 d465bdc8 Guido Trotter
      - If you don't need any lock at a particular level omit that level
130 d465bdc8 Guido Trotter
      - Don't put anything for the BGL level
131 e310b019 Guido Trotter
      - If you want all locks at a level use locking.ALL_SET as a value
132 d465bdc8 Guido Trotter

133 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
134 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
135 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
136 3977a4c1 Guido Trotter

137 d465bdc8 Guido Trotter
    Examples:
138 d465bdc8 Guido Trotter
    # Acquire all nodes and one instance
139 d465bdc8 Guido Trotter
    self.needed_locks = {
140 e310b019 Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
141 3a5d7305 Guido Trotter
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
142 d465bdc8 Guido Trotter
    }
143 d465bdc8 Guido Trotter
    # Acquire just two nodes
144 d465bdc8 Guido Trotter
    self.needed_locks = {
145 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
146 d465bdc8 Guido Trotter
    }
147 d465bdc8 Guido Trotter
    # Acquire no locks
148 d465bdc8 Guido Trotter
    self.needed_locks = {} # No, you can't leave it to the default value None
149 d465bdc8 Guido Trotter

150 d465bdc8 Guido Trotter
    """
151 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
152 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
153 d465bdc8 Guido Trotter
    # time.
154 d465bdc8 Guido Trotter
    if self.REQ_BGL:
155 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
156 d465bdc8 Guido Trotter
    else:
157 d465bdc8 Guido Trotter
      raise NotImplementedError
158 d465bdc8 Guido Trotter
159 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
160 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
161 fb8dcb62 Guido Trotter

162 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
163 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
164 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
165 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
166 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
167 fb8dcb62 Guido Trotter
    default it does nothing.
168 fb8dcb62 Guido Trotter

169 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
170 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
171 fb8dcb62 Guido Trotter

172 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
173 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
174 fb8dcb62 Guido Trotter

175 fb8dcb62 Guido Trotter
    """
176 fb8dcb62 Guido Trotter
177 a8083063 Iustin Pop
  def CheckPrereq(self):
178 a8083063 Iustin Pop
    """Check prerequisites for this LU.
179 a8083063 Iustin Pop

180 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
181 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
182 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
183 a8083063 Iustin Pop
    allowed.
184 a8083063 Iustin Pop

185 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
186 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
187 a8083063 Iustin Pop

188 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
189 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
190 a8083063 Iustin Pop

191 a8083063 Iustin Pop
    """
192 a8083063 Iustin Pop
    raise NotImplementedError
193 a8083063 Iustin Pop
194 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
195 a8083063 Iustin Pop
    """Execute the LU.
196 a8083063 Iustin Pop

197 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
198 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
199 a8083063 Iustin Pop
    code, or expected.
200 a8083063 Iustin Pop

201 a8083063 Iustin Pop
    """
202 a8083063 Iustin Pop
    raise NotImplementedError
203 a8083063 Iustin Pop
204 a8083063 Iustin Pop
  def BuildHooksEnv(self):
205 a8083063 Iustin Pop
    """Build hooks environment for this LU.
206 a8083063 Iustin Pop

207 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
208 a8083063 Iustin Pop
    containing the environment that will be used for running the
209 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
210 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
211 a8083063 Iustin Pop
    the hook should run after the execution.
212 a8083063 Iustin Pop

213 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
214 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
215 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
216 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
217 a8083063 Iustin Pop

218 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
219 a8083063 Iustin Pop

220 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
221 a8083063 Iustin Pop
    not be called.
222 a8083063 Iustin Pop

223 a8083063 Iustin Pop
    """
224 a8083063 Iustin Pop
    raise NotImplementedError
225 a8083063 Iustin Pop
226 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
227 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
228 1fce5219 Guido Trotter

229 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
230 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
231 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
232 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
233 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
234 1fce5219 Guido Trotter

235 1fce5219 Guido Trotter
    Args:
236 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
237 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
238 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
239 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
240 1fce5219 Guido Trotter

241 1fce5219 Guido Trotter
    """
242 1fce5219 Guido Trotter
    return lu_result
243 1fce5219 Guido Trotter
244 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
245 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
246 43905206 Guido Trotter

247 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
248 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
249 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
250 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
251 43905206 Guido Trotter
    before.
252 43905206 Guido Trotter

253 43905206 Guido Trotter
    """
254 43905206 Guido Trotter
    if self.needed_locks is None:
255 43905206 Guido Trotter
      self.needed_locks = {}
256 43905206 Guido Trotter
    else:
257 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
258 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
259 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
260 43905206 Guido Trotter
    if expanded_name is None:
261 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
262 43905206 Guido Trotter
                                  self.op.instance_name)
263 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
264 43905206 Guido Trotter
    self.op.instance_name = expanded_name
265 43905206 Guido Trotter
266 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
267 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
268 c4a2fee1 Guido Trotter

269 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
270 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
271 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
272 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
273 c4a2fee1 Guido Trotter

274 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
275 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
276 c4a2fee1 Guido Trotter

277 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
278 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
279 c4a2fee1 Guido Trotter

280 c4a2fee1 Guido Trotter
    If should be called in DeclareLocks in a way similar to:
281 c4a2fee1 Guido Trotter

282 c4a2fee1 Guido Trotter
    if level == locking.LEVEL_NODE:
283 c4a2fee1 Guido Trotter
      self._LockInstancesNodes()
284 c4a2fee1 Guido Trotter

285 a82ce292 Guido Trotter
    @type primary_only: boolean
286 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
287 a82ce292 Guido Trotter

288 c4a2fee1 Guido Trotter
    """
289 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
290 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
291 c4a2fee1 Guido Trotter
292 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
293 c4a2fee1 Guido Trotter
294 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
295 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
296 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
297 c4a2fee1 Guido Trotter
    wanted_nodes = []
298 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
299 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
300 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
301 a82ce292 Guido Trotter
      if not primary_only:
302 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
303 9513b6ab Guido Trotter
304 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
305 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
306 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
307 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
308 c4a2fee1 Guido Trotter
309 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
310 c4a2fee1 Guido Trotter
311 a8083063 Iustin Pop
312 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
313 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
314 a8083063 Iustin Pop

315 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
316 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
317 a8083063 Iustin Pop

318 a8083063 Iustin Pop
  """
319 a8083063 Iustin Pop
  HPATH = None
320 a8083063 Iustin Pop
  HTYPE = None
321 a8083063 Iustin Pop
322 a8083063 Iustin Pop
323 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
324 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
325 83120a01 Michael Hanselmann

326 83120a01 Michael Hanselmann
  Args:
327 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
328 83120a01 Michael Hanselmann

329 83120a01 Michael Hanselmann
  """
330 3312b702 Iustin Pop
  if not isinstance(nodes, list):
331 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
332 dcb93971 Michael Hanselmann
333 ea47808a Guido Trotter
  if not nodes:
334 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
335 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
336 dcb93971 Michael Hanselmann
337 ea47808a Guido Trotter
  wanted = []
338 ea47808a Guido Trotter
  for name in nodes:
339 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
340 ea47808a Guido Trotter
    if node is None:
341 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
342 ea47808a Guido Trotter
    wanted.append(node)
343 dcb93971 Michael Hanselmann
344 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
345 3312b702 Iustin Pop
346 3312b702 Iustin Pop
347 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
348 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
349 3312b702 Iustin Pop

350 3312b702 Iustin Pop
  Args:
351 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
352 3312b702 Iustin Pop

353 3312b702 Iustin Pop
  """
354 3312b702 Iustin Pop
  if not isinstance(instances, list):
355 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
356 3312b702 Iustin Pop
357 3312b702 Iustin Pop
  if instances:
358 3312b702 Iustin Pop
    wanted = []
359 3312b702 Iustin Pop
360 3312b702 Iustin Pop
    for name in instances:
361 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
362 3312b702 Iustin Pop
      if instance is None:
363 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
364 3312b702 Iustin Pop
      wanted.append(instance)
365 3312b702 Iustin Pop
366 3312b702 Iustin Pop
  else:
367 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
368 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
369 dcb93971 Michael Hanselmann
370 dcb93971 Michael Hanselmann
371 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
372 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
373 83120a01 Michael Hanselmann

374 83120a01 Michael Hanselmann
  Args:
375 83120a01 Michael Hanselmann
    static: Static fields
376 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
377 83120a01 Michael Hanselmann

378 83120a01 Michael Hanselmann
  """
379 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
380 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
381 dcb93971 Michael Hanselmann
382 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
383 dcb93971 Michael Hanselmann
384 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
385 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
386 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
387 3ecf6786 Iustin Pop
                                          difference(all_fields)))
388 dcb93971 Michael Hanselmann
389 dcb93971 Michael Hanselmann
390 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
391 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
392 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
393 ecb215b5 Michael Hanselmann

394 ecb215b5 Michael Hanselmann
  Args:
395 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
396 396e1b78 Michael Hanselmann
  """
397 396e1b78 Michael Hanselmann
  env = {
398 0e137c28 Iustin Pop
    "OP_TARGET": name,
399 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
400 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
401 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
402 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
403 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
404 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
405 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
406 396e1b78 Michael Hanselmann
  }
407 396e1b78 Michael Hanselmann
408 396e1b78 Michael Hanselmann
  if nics:
409 396e1b78 Michael Hanselmann
    nic_count = len(nics)
410 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
411 396e1b78 Michael Hanselmann
      if ip is None:
412 396e1b78 Michael Hanselmann
        ip = ""
413 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
414 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
415 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
416 396e1b78 Michael Hanselmann
  else:
417 396e1b78 Michael Hanselmann
    nic_count = 0
418 396e1b78 Michael Hanselmann
419 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
420 396e1b78 Michael Hanselmann
421 396e1b78 Michael Hanselmann
  return env
422 396e1b78 Michael Hanselmann
423 396e1b78 Michael Hanselmann
424 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
425 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
426 ecb215b5 Michael Hanselmann

427 ecb215b5 Michael Hanselmann
  Args:
428 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
429 ecb215b5 Michael Hanselmann
    override: dict of values to override
430 ecb215b5 Michael Hanselmann
  """
431 338e51e8 Iustin Pop
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
432 396e1b78 Michael Hanselmann
  args = {
433 396e1b78 Michael Hanselmann
    'name': instance.name,
434 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
435 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
436 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
437 396e1b78 Michael Hanselmann
    'status': instance.os,
438 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
439 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
440 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
441 396e1b78 Michael Hanselmann
  }
442 396e1b78 Michael Hanselmann
  if override:
443 396e1b78 Michael Hanselmann
    args.update(override)
444 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
445 396e1b78 Michael Hanselmann
446 396e1b78 Michael Hanselmann
447 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
448 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
449 bf6929a2 Alexander Schreiber

450 bf6929a2 Alexander Schreiber
  """
451 bf6929a2 Alexander Schreiber
  # check bridges existance
452 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
453 72737a7f Iustin Pop
  if not lu.rpc.call_bridges_exist(instance.primary_node, brlist):
454 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
455 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
456 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
457 bf6929a2 Alexander Schreiber
458 bf6929a2 Alexander Schreiber
459 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
460 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
461 a8083063 Iustin Pop

462 a8083063 Iustin Pop
  """
463 a8083063 Iustin Pop
  _OP_REQP = []
464 a8083063 Iustin Pop
465 a8083063 Iustin Pop
  def CheckPrereq(self):
466 a8083063 Iustin Pop
    """Check prerequisites.
467 a8083063 Iustin Pop

468 a8083063 Iustin Pop
    This checks whether the cluster is empty.
469 a8083063 Iustin Pop

470 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
471 a8083063 Iustin Pop

472 a8083063 Iustin Pop
    """
473 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
474 a8083063 Iustin Pop
475 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
476 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
477 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
478 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
479 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
480 db915bd1 Michael Hanselmann
    if instancelist:
481 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
482 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
483 a8083063 Iustin Pop
484 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
485 a8083063 Iustin Pop
    """Destroys the cluster.
486 a8083063 Iustin Pop

487 a8083063 Iustin Pop
    """
488 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
489 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
490 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
491 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
492 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
493 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
494 140aa4a8 Iustin Pop
    return master
495 a8083063 Iustin Pop
496 a8083063 Iustin Pop
497 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
498 a8083063 Iustin Pop
  """Verifies the cluster status.
499 a8083063 Iustin Pop

500 a8083063 Iustin Pop
  """
501 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
502 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
503 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
504 d4b9d97f Guido Trotter
  REQ_BGL = False
505 d4b9d97f Guido Trotter
506 d4b9d97f Guido Trotter
  def ExpandNames(self):
507 d4b9d97f Guido Trotter
    self.needed_locks = {
508 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
509 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
510 d4b9d97f Guido Trotter
    }
511 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
512 a8083063 Iustin Pop
513 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
514 a8083063 Iustin Pop
                  remote_version, feedback_fn):
515 a8083063 Iustin Pop
    """Run multiple tests against a node.
516 a8083063 Iustin Pop

517 a8083063 Iustin Pop
    Test list:
518 a8083063 Iustin Pop
      - compares ganeti version
519 a8083063 Iustin Pop
      - checks vg existance and size > 20G
520 a8083063 Iustin Pop
      - checks config file checksum
521 a8083063 Iustin Pop
      - checks ssh to other nodes
522 a8083063 Iustin Pop

523 a8083063 Iustin Pop
    Args:
524 a8083063 Iustin Pop
      node: name of the node to check
525 a8083063 Iustin Pop
      file_list: required list of files
526 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
527 098c0958 Michael Hanselmann

528 a8083063 Iustin Pop
    """
529 a8083063 Iustin Pop
    # compares ganeti version
530 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
531 a8083063 Iustin Pop
    if not remote_version:
532 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
533 a8083063 Iustin Pop
      return True
534 a8083063 Iustin Pop
535 a8083063 Iustin Pop
    if local_version != remote_version:
536 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
537 a8083063 Iustin Pop
                      (local_version, node, remote_version))
538 a8083063 Iustin Pop
      return True
539 a8083063 Iustin Pop
540 a8083063 Iustin Pop
    # checks vg existance and size > 20G
541 a8083063 Iustin Pop
542 a8083063 Iustin Pop
    bad = False
543 a8083063 Iustin Pop
    if not vglist:
544 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
545 a8083063 Iustin Pop
                      (node,))
546 a8083063 Iustin Pop
      bad = True
547 a8083063 Iustin Pop
    else:
548 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
549 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
550 a8083063 Iustin Pop
      if vgstatus:
551 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
552 a8083063 Iustin Pop
        bad = True
553 a8083063 Iustin Pop
554 2eb78bc8 Guido Trotter
    if not node_result:
555 2eb78bc8 Guido Trotter
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
556 2eb78bc8 Guido Trotter
      return True
557 2eb78bc8 Guido Trotter
558 a8083063 Iustin Pop
    # checks config file checksum
559 a8083063 Iustin Pop
    # checks ssh to any
560 a8083063 Iustin Pop
561 a8083063 Iustin Pop
    if 'filelist' not in node_result:
562 a8083063 Iustin Pop
      bad = True
563 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
564 a8083063 Iustin Pop
    else:
565 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
566 a8083063 Iustin Pop
      for file_name in file_list:
567 a8083063 Iustin Pop
        if file_name not in remote_cksum:
568 a8083063 Iustin Pop
          bad = True
569 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
570 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
571 a8083063 Iustin Pop
          bad = True
572 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
573 a8083063 Iustin Pop
574 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
575 a8083063 Iustin Pop
      bad = True
576 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
577 a8083063 Iustin Pop
    else:
578 a8083063 Iustin Pop
      if node_result['nodelist']:
579 a8083063 Iustin Pop
        bad = True
580 a8083063 Iustin Pop
        for node in node_result['nodelist']:
581 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
582 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
583 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
584 9d4bfc96 Iustin Pop
      bad = True
585 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
586 9d4bfc96 Iustin Pop
    else:
587 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
588 9d4bfc96 Iustin Pop
        bad = True
589 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
590 9d4bfc96 Iustin Pop
        for node in nlist:
591 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
592 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
593 9d4bfc96 Iustin Pop
594 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
595 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
596 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
597 e69d05fd Iustin Pop
        if hv_result is not None:
598 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
599 e69d05fd Iustin Pop
                      (hv_name, hv_result))
600 a8083063 Iustin Pop
    return bad
601 a8083063 Iustin Pop
602 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
603 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
604 a8083063 Iustin Pop
    """Verify an instance.
605 a8083063 Iustin Pop

606 a8083063 Iustin Pop
    This function checks to see if the required block devices are
607 a8083063 Iustin Pop
    available on the instance's node.
608 a8083063 Iustin Pop

609 a8083063 Iustin Pop
    """
610 a8083063 Iustin Pop
    bad = False
611 a8083063 Iustin Pop
612 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
613 a8083063 Iustin Pop
614 a8083063 Iustin Pop
    node_vol_should = {}
615 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
616 a8083063 Iustin Pop
617 a8083063 Iustin Pop
    for node in node_vol_should:
618 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
619 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
620 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
621 a8083063 Iustin Pop
                          (volume, node))
622 a8083063 Iustin Pop
          bad = True
623 a8083063 Iustin Pop
624 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
625 a872dae6 Guido Trotter
      if (node_current not in node_instance or
626 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
627 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
628 a8083063 Iustin Pop
                        (instance, node_current))
629 a8083063 Iustin Pop
        bad = True
630 a8083063 Iustin Pop
631 a8083063 Iustin Pop
    for node in node_instance:
632 a8083063 Iustin Pop
      if (not node == node_current):
633 a8083063 Iustin Pop
        if instance in node_instance[node]:
634 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
635 a8083063 Iustin Pop
                          (instance, node))
636 a8083063 Iustin Pop
          bad = True
637 a8083063 Iustin Pop
638 6a438c98 Michael Hanselmann
    return bad
639 a8083063 Iustin Pop
640 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
641 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
642 a8083063 Iustin Pop

643 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
644 a8083063 Iustin Pop
    reported as unknown.
645 a8083063 Iustin Pop

646 a8083063 Iustin Pop
    """
647 a8083063 Iustin Pop
    bad = False
648 a8083063 Iustin Pop
649 a8083063 Iustin Pop
    for node in node_vol_is:
650 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
651 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
652 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
653 a8083063 Iustin Pop
                      (volume, node))
654 a8083063 Iustin Pop
          bad = True
655 a8083063 Iustin Pop
    return bad
656 a8083063 Iustin Pop
657 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
658 a8083063 Iustin Pop
    """Verify the list of running instances.
659 a8083063 Iustin Pop

660 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
661 a8083063 Iustin Pop

662 a8083063 Iustin Pop
    """
663 a8083063 Iustin Pop
    bad = False
664 a8083063 Iustin Pop
    for node in node_instance:
665 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
666 a8083063 Iustin Pop
        if runninginstance not in instancelist:
667 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
668 a8083063 Iustin Pop
                          (runninginstance, node))
669 a8083063 Iustin Pop
          bad = True
670 a8083063 Iustin Pop
    return bad
671 a8083063 Iustin Pop
672 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
673 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
674 2b3b6ddd Guido Trotter

675 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
676 2b3b6ddd Guido Trotter
    was primary for.
677 2b3b6ddd Guido Trotter

678 2b3b6ddd Guido Trotter
    """
679 2b3b6ddd Guido Trotter
    bad = False
680 2b3b6ddd Guido Trotter
681 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
682 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
683 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
684 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
685 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
686 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
687 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
688 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
689 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
690 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
691 2b3b6ddd Guido Trotter
        needed_mem = 0
692 2b3b6ddd Guido Trotter
        for instance in instances:
693 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
694 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
695 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
696 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
697 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
698 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
699 2b3b6ddd Guido Trotter
          bad = True
700 2b3b6ddd Guido Trotter
    return bad
701 2b3b6ddd Guido Trotter
702 a8083063 Iustin Pop
  def CheckPrereq(self):
703 a8083063 Iustin Pop
    """Check prerequisites.
704 a8083063 Iustin Pop

705 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
706 e54c4c5e Guido Trotter
    all its members are valid.
707 a8083063 Iustin Pop

708 a8083063 Iustin Pop
    """
709 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
710 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
711 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
712 a8083063 Iustin Pop
713 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
714 d8fff41c Guido Trotter
    """Build hooks env.
715 d8fff41c Guido Trotter

716 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
717 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
718 d8fff41c Guido Trotter

719 d8fff41c Guido Trotter
    """
720 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
721 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
722 d8fff41c Guido Trotter
    env = {}
723 d8fff41c Guido Trotter
    return env, [], all_nodes
724 d8fff41c Guido Trotter
725 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
726 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
727 a8083063 Iustin Pop

728 a8083063 Iustin Pop
    """
729 a8083063 Iustin Pop
    bad = False
730 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
731 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
732 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
733 a8083063 Iustin Pop
734 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
735 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
736 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
737 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
738 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
739 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
740 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
741 a8083063 Iustin Pop
    node_volume = {}
742 a8083063 Iustin Pop
    node_instance = {}
743 9c9c7d30 Guido Trotter
    node_info = {}
744 26b6af5e Guido Trotter
    instance_cfg = {}
745 a8083063 Iustin Pop
746 a8083063 Iustin Pop
    # FIXME: verify OS list
747 a8083063 Iustin Pop
    # do local checksums
748 d6a02168 Michael Hanselmann
    file_names = []
749 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
750 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
751 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
752 a8083063 Iustin Pop
753 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
754 72737a7f Iustin Pop
    all_volumeinfo = self.rpc.call_volume_list(nodelist, vg_name)
755 72737a7f Iustin Pop
    all_instanceinfo = self.rpc.call_instance_list(nodelist, hypervisors)
756 72737a7f Iustin Pop
    all_vglist = self.rpc.call_vg_list(nodelist)
757 a8083063 Iustin Pop
    node_verify_param = {
758 a8083063 Iustin Pop
      'filelist': file_names,
759 a8083063 Iustin Pop
      'nodelist': nodelist,
760 e69d05fd Iustin Pop
      'hypervisor': hypervisors,
761 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
762 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
763 a8083063 Iustin Pop
      }
764 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
765 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
766 72737a7f Iustin Pop
    all_rversion = self.rpc.call_version(nodelist)
767 72737a7f Iustin Pop
    all_ninfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(),
768 72737a7f Iustin Pop
                                        self.cfg.GetHypervisorType())
769 a8083063 Iustin Pop
770 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
771 a8083063 Iustin Pop
    for node in nodelist:
772 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
773 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
774 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
775 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
776 a8083063 Iustin Pop
      bad = bad or result
777 a8083063 Iustin Pop
778 a8083063 Iustin Pop
      # node_volume
779 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
780 a8083063 Iustin Pop
781 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
782 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
783 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
784 b63ed789 Iustin Pop
        bad = True
785 b63ed789 Iustin Pop
        node_volume[node] = {}
786 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
787 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
788 a8083063 Iustin Pop
        bad = True
789 a8083063 Iustin Pop
        continue
790 b63ed789 Iustin Pop
      else:
791 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
792 a8083063 Iustin Pop
793 a8083063 Iustin Pop
      # node_instance
794 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
795 a8083063 Iustin Pop
      if type(nodeinstance) != list:
796 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
797 a8083063 Iustin Pop
        bad = True
798 a8083063 Iustin Pop
        continue
799 a8083063 Iustin Pop
800 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
801 a8083063 Iustin Pop
802 9c9c7d30 Guido Trotter
      # node_info
803 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
804 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
805 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
806 9c9c7d30 Guido Trotter
        bad = True
807 9c9c7d30 Guido Trotter
        continue
808 9c9c7d30 Guido Trotter
809 9c9c7d30 Guido Trotter
      try:
810 9c9c7d30 Guido Trotter
        node_info[node] = {
811 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
812 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
813 93e4c50b Guido Trotter
          "pinst": [],
814 93e4c50b Guido Trotter
          "sinst": [],
815 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
816 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
817 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
818 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
819 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
820 36e7da50 Guido Trotter
          # secondary.
821 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
822 9c9c7d30 Guido Trotter
        }
823 9c9c7d30 Guido Trotter
      except ValueError:
824 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
825 9c9c7d30 Guido Trotter
        bad = True
826 9c9c7d30 Guido Trotter
        continue
827 9c9c7d30 Guido Trotter
828 a8083063 Iustin Pop
    node_vol_should = {}
829 a8083063 Iustin Pop
830 a8083063 Iustin Pop
    for instance in instancelist:
831 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
832 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
833 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
834 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
835 c5705f58 Guido Trotter
      bad = bad or result
836 a8083063 Iustin Pop
837 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
838 a8083063 Iustin Pop
839 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
840 26b6af5e Guido Trotter
841 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
842 93e4c50b Guido Trotter
      if pnode in node_info:
843 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
844 93e4c50b Guido Trotter
      else:
845 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
846 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
847 93e4c50b Guido Trotter
        bad = True
848 93e4c50b Guido Trotter
849 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
850 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
851 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
852 93e4c50b Guido Trotter
      # supported either.
853 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
854 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
855 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
856 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
857 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
858 93e4c50b Guido Trotter
                    % instance)
859 93e4c50b Guido Trotter
860 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
861 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
862 3924700f Iustin Pop
863 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
864 93e4c50b Guido Trotter
        if snode in node_info:
865 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
866 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
867 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
868 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
869 93e4c50b Guido Trotter
        else:
870 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
871 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
872 93e4c50b Guido Trotter
873 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
874 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
875 a8083063 Iustin Pop
                                       feedback_fn)
876 a8083063 Iustin Pop
    bad = bad or result
877 a8083063 Iustin Pop
878 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
879 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
880 a8083063 Iustin Pop
                                         feedback_fn)
881 a8083063 Iustin Pop
    bad = bad or result
882 a8083063 Iustin Pop
883 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
884 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
885 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
886 e54c4c5e Guido Trotter
      bad = bad or result
887 2b3b6ddd Guido Trotter
888 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
889 2b3b6ddd Guido Trotter
    if i_non_redundant:
890 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
891 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
892 2b3b6ddd Guido Trotter
893 3924700f Iustin Pop
    if i_non_a_balanced:
894 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
895 3924700f Iustin Pop
                  % len(i_non_a_balanced))
896 3924700f Iustin Pop
897 34290825 Michael Hanselmann
    return not bad
898 a8083063 Iustin Pop
899 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
900 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
901 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
902 d8fff41c Guido Trotter

903 d8fff41c Guido Trotter
    Args:
904 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
905 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
906 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
907 d8fff41c Guido Trotter
      lu_result: previous Exec result
908 d8fff41c Guido Trotter

909 d8fff41c Guido Trotter
    """
910 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
911 38206f3c Iustin Pop
    # their results
912 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
913 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
914 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
915 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
916 d8fff41c Guido Trotter
      if not hooks_results:
917 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
918 d8fff41c Guido Trotter
        lu_result = 1
919 d8fff41c Guido Trotter
      else:
920 d8fff41c Guido Trotter
        for node_name in hooks_results:
921 d8fff41c Guido Trotter
          show_node_header = True
922 d8fff41c Guido Trotter
          res = hooks_results[node_name]
923 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
924 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
925 d8fff41c Guido Trotter
            lu_result = 1
926 d8fff41c Guido Trotter
            continue
927 d8fff41c Guido Trotter
          for script, hkr, output in res:
928 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
929 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
930 d8fff41c Guido Trotter
              # failing hooks on that node
931 d8fff41c Guido Trotter
              if show_node_header:
932 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
933 d8fff41c Guido Trotter
                show_node_header = False
934 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
935 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
936 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
937 d8fff41c Guido Trotter
              lu_result = 1
938 d8fff41c Guido Trotter
939 d8fff41c Guido Trotter
      return lu_result
940 d8fff41c Guido Trotter
941 a8083063 Iustin Pop
942 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
943 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
944 2c95a8d4 Iustin Pop

945 2c95a8d4 Iustin Pop
  """
946 2c95a8d4 Iustin Pop
  _OP_REQP = []
947 d4b9d97f Guido Trotter
  REQ_BGL = False
948 d4b9d97f Guido Trotter
949 d4b9d97f Guido Trotter
  def ExpandNames(self):
950 d4b9d97f Guido Trotter
    self.needed_locks = {
951 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
952 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
953 d4b9d97f Guido Trotter
    }
954 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
955 2c95a8d4 Iustin Pop
956 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
957 2c95a8d4 Iustin Pop
    """Check prerequisites.
958 2c95a8d4 Iustin Pop

959 2c95a8d4 Iustin Pop
    This has no prerequisites.
960 2c95a8d4 Iustin Pop

961 2c95a8d4 Iustin Pop
    """
962 2c95a8d4 Iustin Pop
    pass
963 2c95a8d4 Iustin Pop
964 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
965 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
966 2c95a8d4 Iustin Pop

967 2c95a8d4 Iustin Pop
    """
968 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
969 2c95a8d4 Iustin Pop
970 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
971 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
972 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
973 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
974 2c95a8d4 Iustin Pop
975 2c95a8d4 Iustin Pop
    nv_dict = {}
976 2c95a8d4 Iustin Pop
    for inst in instances:
977 2c95a8d4 Iustin Pop
      inst_lvs = {}
978 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
979 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
980 2c95a8d4 Iustin Pop
        continue
981 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
982 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
983 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
984 2c95a8d4 Iustin Pop
        for vol in vol_list:
985 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
986 2c95a8d4 Iustin Pop
987 2c95a8d4 Iustin Pop
    if not nv_dict:
988 2c95a8d4 Iustin Pop
      return result
989 2c95a8d4 Iustin Pop
990 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
991 2c95a8d4 Iustin Pop
992 2c95a8d4 Iustin Pop
    to_act = set()
993 2c95a8d4 Iustin Pop
    for node in nodes:
994 2c95a8d4 Iustin Pop
      # node_volume
995 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
996 2c95a8d4 Iustin Pop
997 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
998 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
999 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1000 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1001 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
1002 2c95a8d4 Iustin Pop
                    (node,))
1003 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1004 2c95a8d4 Iustin Pop
        continue
1005 2c95a8d4 Iustin Pop
1006 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1007 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1008 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1009 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1010 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1011 2c95a8d4 Iustin Pop
1012 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1013 b63ed789 Iustin Pop
    # data better
1014 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1015 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1016 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1017 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1018 b63ed789 Iustin Pop
1019 2c95a8d4 Iustin Pop
    return result
1020 2c95a8d4 Iustin Pop
1021 2c95a8d4 Iustin Pop
1022 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1023 07bd8a51 Iustin Pop
  """Rename the cluster.
1024 07bd8a51 Iustin Pop

1025 07bd8a51 Iustin Pop
  """
1026 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1027 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1028 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1029 07bd8a51 Iustin Pop
1030 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1031 07bd8a51 Iustin Pop
    """Build hooks env.
1032 07bd8a51 Iustin Pop

1033 07bd8a51 Iustin Pop
    """
1034 07bd8a51 Iustin Pop
    env = {
1035 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1036 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1037 07bd8a51 Iustin Pop
      }
1038 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1039 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1040 07bd8a51 Iustin Pop
1041 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1042 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1043 07bd8a51 Iustin Pop

1044 07bd8a51 Iustin Pop
    """
1045 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1046 07bd8a51 Iustin Pop
1047 bcf043c9 Iustin Pop
    new_name = hostname.name
1048 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1049 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1050 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1051 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1052 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1053 07bd8a51 Iustin Pop
                                 " cluster has changed")
1054 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1055 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1056 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1057 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1058 07bd8a51 Iustin Pop
                                   new_ip)
1059 07bd8a51 Iustin Pop
1060 07bd8a51 Iustin Pop
    self.op.name = new_name
1061 07bd8a51 Iustin Pop
1062 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1063 07bd8a51 Iustin Pop
    """Rename the cluster.
1064 07bd8a51 Iustin Pop

1065 07bd8a51 Iustin Pop
    """
1066 07bd8a51 Iustin Pop
    clustername = self.op.name
1067 07bd8a51 Iustin Pop
    ip = self.ip
1068 07bd8a51 Iustin Pop
1069 07bd8a51 Iustin Pop
    # shutdown the master IP
1070 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1071 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
1072 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1073 07bd8a51 Iustin Pop
1074 07bd8a51 Iustin Pop
    try:
1075 07bd8a51 Iustin Pop
      # modify the sstore
1076 d6a02168 Michael Hanselmann
      # TODO: sstore
1077 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1078 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1079 07bd8a51 Iustin Pop
1080 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1081 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1082 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1083 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1084 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1085 07bd8a51 Iustin Pop
1086 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1087 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1088 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1089 72737a7f Iustin Pop
        result = self.rpc.call_upload_file(dist_nodes, fname)
1090 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1091 07bd8a51 Iustin Pop
          if not result[to_node]:
1092 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1093 07bd8a51 Iustin Pop
                         (fname, to_node))
1094 07bd8a51 Iustin Pop
    finally:
1095 72737a7f Iustin Pop
      if not self.rpc.call_node_start_master(master, False):
1096 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1097 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1098 07bd8a51 Iustin Pop
1099 07bd8a51 Iustin Pop
1100 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1101 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1102 8084f9f6 Manuel Franceschini

1103 8084f9f6 Manuel Franceschini
  Args:
1104 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1105 8084f9f6 Manuel Franceschini

1106 8084f9f6 Manuel Franceschini
  Returns:
1107 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1108 8084f9f6 Manuel Franceschini

1109 8084f9f6 Manuel Franceschini
  """
1110 8084f9f6 Manuel Franceschini
  if disk.children:
1111 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1112 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1113 8084f9f6 Manuel Franceschini
        return True
1114 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1115 8084f9f6 Manuel Franceschini
1116 8084f9f6 Manuel Franceschini
1117 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1118 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1119 8084f9f6 Manuel Franceschini

1120 8084f9f6 Manuel Franceschini
  """
1121 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1122 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1123 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1124 c53279cf Guido Trotter
  REQ_BGL = False
1125 c53279cf Guido Trotter
1126 c53279cf Guido Trotter
  def ExpandNames(self):
1127 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1128 c53279cf Guido Trotter
    # all nodes to be modified.
1129 c53279cf Guido Trotter
    self.needed_locks = {
1130 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1131 c53279cf Guido Trotter
    }
1132 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1133 8084f9f6 Manuel Franceschini
1134 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1135 8084f9f6 Manuel Franceschini
    """Build hooks env.
1136 8084f9f6 Manuel Franceschini

1137 8084f9f6 Manuel Franceschini
    """
1138 8084f9f6 Manuel Franceschini
    env = {
1139 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1140 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1141 8084f9f6 Manuel Franceschini
      }
1142 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1143 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1144 8084f9f6 Manuel Franceschini
1145 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1146 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1147 8084f9f6 Manuel Franceschini

1148 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1149 5f83e263 Iustin Pop
    if the given volume group is valid.
1150 8084f9f6 Manuel Franceschini

1151 8084f9f6 Manuel Franceschini
    """
1152 c53279cf Guido Trotter
    # FIXME: This only works because there is only one parameter that can be
1153 c53279cf Guido Trotter
    # changed or removed.
1154 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
1155 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1156 8084f9f6 Manuel Franceschini
      for inst in instances:
1157 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1158 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1159 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1160 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1161 8084f9f6 Manuel Franceschini
1162 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1163 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1164 c53279cf Guido Trotter
      node_list = self.acquired_locks[locking.LEVEL_NODE]
1165 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1166 8084f9f6 Manuel Franceschini
      for node in node_list:
1167 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1168 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1169 8084f9f6 Manuel Franceschini
        if vgstatus:
1170 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1171 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1172 8084f9f6 Manuel Franceschini
1173 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1174 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1175 8084f9f6 Manuel Franceschini

1176 8084f9f6 Manuel Franceschini
    """
1177 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
1178 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
1179 8084f9f6 Manuel Franceschini
    else:
1180 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1181 8084f9f6 Manuel Franceschini
                  " state, not changing")
1182 8084f9f6 Manuel Franceschini
1183 8084f9f6 Manuel Franceschini
1184 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1185 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1186 a8083063 Iustin Pop

1187 a8083063 Iustin Pop
  """
1188 a8083063 Iustin Pop
  if not instance.disks:
1189 a8083063 Iustin Pop
    return True
1190 a8083063 Iustin Pop
1191 a8083063 Iustin Pop
  if not oneshot:
1192 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1193 a8083063 Iustin Pop
1194 a8083063 Iustin Pop
  node = instance.primary_node
1195 a8083063 Iustin Pop
1196 a8083063 Iustin Pop
  for dev in instance.disks:
1197 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1198 a8083063 Iustin Pop
1199 a8083063 Iustin Pop
  retries = 0
1200 a8083063 Iustin Pop
  while True:
1201 a8083063 Iustin Pop
    max_time = 0
1202 a8083063 Iustin Pop
    done = True
1203 a8083063 Iustin Pop
    cumul_degraded = False
1204 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1205 a8083063 Iustin Pop
    if not rstats:
1206 b9bddb6b Iustin Pop
      lu.proc.LogWarning("Can't get any data from node %s" % node)
1207 a8083063 Iustin Pop
      retries += 1
1208 a8083063 Iustin Pop
      if retries >= 10:
1209 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1210 3ecf6786 Iustin Pop
                                 " aborting." % node)
1211 a8083063 Iustin Pop
      time.sleep(6)
1212 a8083063 Iustin Pop
      continue
1213 a8083063 Iustin Pop
    retries = 0
1214 a8083063 Iustin Pop
    for i in range(len(rstats)):
1215 a8083063 Iustin Pop
      mstat = rstats[i]
1216 a8083063 Iustin Pop
      if mstat is None:
1217 b9bddb6b Iustin Pop
        lu.proc.LogWarning("Can't compute data for node %s/%s" %
1218 b9bddb6b Iustin Pop
                           (node, instance.disks[i].iv_name))
1219 a8083063 Iustin Pop
        continue
1220 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1221 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1222 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1223 a8083063 Iustin Pop
      if perc_done is not None:
1224 a8083063 Iustin Pop
        done = False
1225 a8083063 Iustin Pop
        if est_time is not None:
1226 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1227 a8083063 Iustin Pop
          max_time = est_time
1228 a8083063 Iustin Pop
        else:
1229 a8083063 Iustin Pop
          rem_time = "no time estimate"
1230 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1231 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1232 a8083063 Iustin Pop
    if done or oneshot:
1233 a8083063 Iustin Pop
      break
1234 a8083063 Iustin Pop
1235 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1236 a8083063 Iustin Pop
1237 a8083063 Iustin Pop
  if done:
1238 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1239 a8083063 Iustin Pop
  return not cumul_degraded
1240 a8083063 Iustin Pop
1241 a8083063 Iustin Pop
1242 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1243 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1244 a8083063 Iustin Pop

1245 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1246 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1247 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1248 0834c866 Iustin Pop

1249 a8083063 Iustin Pop
  """
1250 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1251 0834c866 Iustin Pop
  if ldisk:
1252 0834c866 Iustin Pop
    idx = 6
1253 0834c866 Iustin Pop
  else:
1254 0834c866 Iustin Pop
    idx = 5
1255 a8083063 Iustin Pop
1256 a8083063 Iustin Pop
  result = True
1257 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1258 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1259 a8083063 Iustin Pop
    if not rstats:
1260 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1261 a8083063 Iustin Pop
      result = False
1262 a8083063 Iustin Pop
    else:
1263 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1264 a8083063 Iustin Pop
  if dev.children:
1265 a8083063 Iustin Pop
    for child in dev.children:
1266 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1267 a8083063 Iustin Pop
1268 a8083063 Iustin Pop
  return result
1269 a8083063 Iustin Pop
1270 a8083063 Iustin Pop
1271 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1272 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1273 a8083063 Iustin Pop

1274 a8083063 Iustin Pop
  """
1275 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1276 6bf01bbb Guido Trotter
  REQ_BGL = False
1277 a8083063 Iustin Pop
1278 6bf01bbb Guido Trotter
  def ExpandNames(self):
1279 1f9430d6 Iustin Pop
    if self.op.names:
1280 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1281 1f9430d6 Iustin Pop
1282 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1283 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1284 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1285 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1286 1f9430d6 Iustin Pop
1287 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1288 6bf01bbb Guido Trotter
    self.needed_locks = {}
1289 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1290 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1291 6bf01bbb Guido Trotter
1292 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1293 6bf01bbb Guido Trotter
    """Check prerequisites.
1294 6bf01bbb Guido Trotter

1295 6bf01bbb Guido Trotter
    """
1296 6bf01bbb Guido Trotter
1297 1f9430d6 Iustin Pop
  @staticmethod
1298 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1299 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1300 1f9430d6 Iustin Pop

1301 1f9430d6 Iustin Pop
      Args:
1302 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1303 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1304 1f9430d6 Iustin Pop

1305 1f9430d6 Iustin Pop
      Returns:
1306 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1307 1f9430d6 Iustin Pop
             nodes as
1308 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1309 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1310 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1311 1f9430d6 Iustin Pop
                  }
1312 1f9430d6 Iustin Pop

1313 1f9430d6 Iustin Pop
    """
1314 1f9430d6 Iustin Pop
    all_os = {}
1315 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1316 1f9430d6 Iustin Pop
      if not nr:
1317 1f9430d6 Iustin Pop
        continue
1318 b4de68a9 Iustin Pop
      for os_obj in nr:
1319 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1320 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1321 1f9430d6 Iustin Pop
          # for each node in node_list
1322 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1323 1f9430d6 Iustin Pop
          for nname in node_list:
1324 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1325 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1326 1f9430d6 Iustin Pop
    return all_os
1327 a8083063 Iustin Pop
1328 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1329 a8083063 Iustin Pop
    """Compute the list of OSes.
1330 a8083063 Iustin Pop

1331 a8083063 Iustin Pop
    """
1332 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1333 72737a7f Iustin Pop
    node_data = self.rpc.call_os_diagnose(node_list)
1334 a8083063 Iustin Pop
    if node_data == False:
1335 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1336 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1337 1f9430d6 Iustin Pop
    output = []
1338 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1339 1f9430d6 Iustin Pop
      row = []
1340 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1341 1f9430d6 Iustin Pop
        if field == "name":
1342 1f9430d6 Iustin Pop
          val = os_name
1343 1f9430d6 Iustin Pop
        elif field == "valid":
1344 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1345 1f9430d6 Iustin Pop
        elif field == "node_status":
1346 1f9430d6 Iustin Pop
          val = {}
1347 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1348 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1349 1f9430d6 Iustin Pop
        else:
1350 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1351 1f9430d6 Iustin Pop
        row.append(val)
1352 1f9430d6 Iustin Pop
      output.append(row)
1353 1f9430d6 Iustin Pop
1354 1f9430d6 Iustin Pop
    return output
1355 a8083063 Iustin Pop
1356 a8083063 Iustin Pop
1357 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1358 a8083063 Iustin Pop
  """Logical unit for removing a node.
1359 a8083063 Iustin Pop

1360 a8083063 Iustin Pop
  """
1361 a8083063 Iustin Pop
  HPATH = "node-remove"
1362 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1363 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1364 a8083063 Iustin Pop
1365 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1366 a8083063 Iustin Pop
    """Build hooks env.
1367 a8083063 Iustin Pop

1368 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1369 d08869ee Guido Trotter
    node would then be impossible to remove.
1370 a8083063 Iustin Pop

1371 a8083063 Iustin Pop
    """
1372 396e1b78 Michael Hanselmann
    env = {
1373 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1374 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1375 396e1b78 Michael Hanselmann
      }
1376 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1377 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1378 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1379 a8083063 Iustin Pop
1380 a8083063 Iustin Pop
  def CheckPrereq(self):
1381 a8083063 Iustin Pop
    """Check prerequisites.
1382 a8083063 Iustin Pop

1383 a8083063 Iustin Pop
    This checks:
1384 a8083063 Iustin Pop
     - the node exists in the configuration
1385 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1386 a8083063 Iustin Pop
     - it's not the master
1387 a8083063 Iustin Pop

1388 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1389 a8083063 Iustin Pop

1390 a8083063 Iustin Pop
    """
1391 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1392 a8083063 Iustin Pop
    if node is None:
1393 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1394 a8083063 Iustin Pop
1395 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1396 a8083063 Iustin Pop
1397 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1398 a8083063 Iustin Pop
    if node.name == masternode:
1399 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1400 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1401 a8083063 Iustin Pop
1402 a8083063 Iustin Pop
    for instance_name in instance_list:
1403 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1404 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1405 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1406 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1407 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1408 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1409 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1410 a8083063 Iustin Pop
    self.op.node_name = node.name
1411 a8083063 Iustin Pop
    self.node = node
1412 a8083063 Iustin Pop
1413 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1414 a8083063 Iustin Pop
    """Removes the node from the cluster.
1415 a8083063 Iustin Pop

1416 a8083063 Iustin Pop
    """
1417 a8083063 Iustin Pop
    node = self.node
1418 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1419 a8083063 Iustin Pop
                node.name)
1420 a8083063 Iustin Pop
1421 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1422 a8083063 Iustin Pop
1423 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1424 c8a0948f Michael Hanselmann
1425 a8083063 Iustin Pop
1426 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1427 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1428 a8083063 Iustin Pop

1429 a8083063 Iustin Pop
  """
1430 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1431 35705d8f Guido Trotter
  REQ_BGL = False
1432 a8083063 Iustin Pop
1433 35705d8f Guido Trotter
  def ExpandNames(self):
1434 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1435 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1436 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1437 e8a4c138 Iustin Pop
      "bootid",
1438 e8a4c138 Iustin Pop
      "ctotal",
1439 e8a4c138 Iustin Pop
      ])
1440 a8083063 Iustin Pop
1441 c8d8b4c8 Iustin Pop
    self.static_fields = frozenset([
1442 c8d8b4c8 Iustin Pop
      "name", "pinst_cnt", "sinst_cnt",
1443 c8d8b4c8 Iustin Pop
      "pinst_list", "sinst_list",
1444 c8d8b4c8 Iustin Pop
      "pip", "sip", "tags",
1445 38d7239a Iustin Pop
      "serial_no",
1446 c8d8b4c8 Iustin Pop
      ])
1447 c8d8b4c8 Iustin Pop
1448 c8d8b4c8 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
1449 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1450 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1451 a8083063 Iustin Pop
1452 35705d8f Guido Trotter
    self.needed_locks = {}
1453 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1454 c8d8b4c8 Iustin Pop
1455 c8d8b4c8 Iustin Pop
    if self.op.names:
1456 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1457 35705d8f Guido Trotter
    else:
1458 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1459 c8d8b4c8 Iustin Pop
1460 c8d8b4c8 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
1461 c8d8b4c8 Iustin Pop
    if self.do_locking:
1462 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1463 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1464 c8d8b4c8 Iustin Pop
1465 35705d8f Guido Trotter
1466 35705d8f Guido Trotter
  def CheckPrereq(self):
1467 35705d8f Guido Trotter
    """Check prerequisites.
1468 35705d8f Guido Trotter

1469 35705d8f Guido Trotter
    """
1470 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1471 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1472 c8d8b4c8 Iustin Pop
    pass
1473 a8083063 Iustin Pop
1474 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1475 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1476 a8083063 Iustin Pop

1477 a8083063 Iustin Pop
    """
1478 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1479 c8d8b4c8 Iustin Pop
    if self.do_locking:
1480 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1481 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1482 3fa93523 Guido Trotter
      nodenames = self.wanted
1483 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1484 3fa93523 Guido Trotter
      if missing:
1485 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1486 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1487 c8d8b4c8 Iustin Pop
    else:
1488 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1489 c1f1cbb2 Iustin Pop
1490 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
1491 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1492 a8083063 Iustin Pop
1493 a8083063 Iustin Pop
    # begin data gathering
1494 a8083063 Iustin Pop
1495 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1496 a8083063 Iustin Pop
      live_data = {}
1497 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1498 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
1499 a8083063 Iustin Pop
      for name in nodenames:
1500 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1501 a8083063 Iustin Pop
        if nodeinfo:
1502 a8083063 Iustin Pop
          live_data[name] = {
1503 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1504 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1505 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1506 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1507 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1508 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1509 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1510 a8083063 Iustin Pop
            }
1511 a8083063 Iustin Pop
        else:
1512 a8083063 Iustin Pop
          live_data[name] = {}
1513 a8083063 Iustin Pop
    else:
1514 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1515 a8083063 Iustin Pop
1516 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1517 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1518 a8083063 Iustin Pop
1519 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1520 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1521 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1522 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1523 a8083063 Iustin Pop
1524 ec223efb Iustin Pop
      for instance_name in instancelist:
1525 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1526 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1527 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1528 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1529 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1530 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1531 a8083063 Iustin Pop
1532 a8083063 Iustin Pop
    # end data gathering
1533 a8083063 Iustin Pop
1534 a8083063 Iustin Pop
    output = []
1535 a8083063 Iustin Pop
    for node in nodelist:
1536 a8083063 Iustin Pop
      node_output = []
1537 a8083063 Iustin Pop
      for field in self.op.output_fields:
1538 a8083063 Iustin Pop
        if field == "name":
1539 a8083063 Iustin Pop
          val = node.name
1540 ec223efb Iustin Pop
        elif field == "pinst_list":
1541 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1542 ec223efb Iustin Pop
        elif field == "sinst_list":
1543 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1544 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1545 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1546 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1547 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1548 a8083063 Iustin Pop
        elif field == "pip":
1549 a8083063 Iustin Pop
          val = node.primary_ip
1550 a8083063 Iustin Pop
        elif field == "sip":
1551 a8083063 Iustin Pop
          val = node.secondary_ip
1552 130a6a6f Iustin Pop
        elif field == "tags":
1553 130a6a6f Iustin Pop
          val = list(node.GetTags())
1554 38d7239a Iustin Pop
        elif field == "serial_no":
1555 38d7239a Iustin Pop
          val = node.serial_no
1556 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1557 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1558 a8083063 Iustin Pop
        else:
1559 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1560 a8083063 Iustin Pop
        node_output.append(val)
1561 a8083063 Iustin Pop
      output.append(node_output)
1562 a8083063 Iustin Pop
1563 a8083063 Iustin Pop
    return output
1564 a8083063 Iustin Pop
1565 a8083063 Iustin Pop
1566 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1567 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1568 dcb93971 Michael Hanselmann

1569 dcb93971 Michael Hanselmann
  """
1570 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1571 21a15682 Guido Trotter
  REQ_BGL = False
1572 21a15682 Guido Trotter
1573 21a15682 Guido Trotter
  def ExpandNames(self):
1574 21a15682 Guido Trotter
    _CheckOutputFields(static=["node"],
1575 21a15682 Guido Trotter
                       dynamic=["phys", "vg", "name", "size", "instance"],
1576 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1577 21a15682 Guido Trotter
1578 21a15682 Guido Trotter
    self.needed_locks = {}
1579 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1580 21a15682 Guido Trotter
    if not self.op.nodes:
1581 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1582 21a15682 Guido Trotter
    else:
1583 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1584 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1585 dcb93971 Michael Hanselmann
1586 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1587 dcb93971 Michael Hanselmann
    """Check prerequisites.
1588 dcb93971 Michael Hanselmann

1589 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1590 dcb93971 Michael Hanselmann

1591 dcb93971 Michael Hanselmann
    """
1592 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1593 dcb93971 Michael Hanselmann
1594 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1595 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1596 dcb93971 Michael Hanselmann

1597 dcb93971 Michael Hanselmann
    """
1598 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1599 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
1600 dcb93971 Michael Hanselmann
1601 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1602 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1603 dcb93971 Michael Hanselmann
1604 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1605 dcb93971 Michael Hanselmann
1606 dcb93971 Michael Hanselmann
    output = []
1607 dcb93971 Michael Hanselmann
    for node in nodenames:
1608 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1609 37d19eb2 Michael Hanselmann
        continue
1610 37d19eb2 Michael Hanselmann
1611 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1612 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1613 dcb93971 Michael Hanselmann
1614 dcb93971 Michael Hanselmann
      for vol in node_vols:
1615 dcb93971 Michael Hanselmann
        node_output = []
1616 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1617 dcb93971 Michael Hanselmann
          if field == "node":
1618 dcb93971 Michael Hanselmann
            val = node
1619 dcb93971 Michael Hanselmann
          elif field == "phys":
1620 dcb93971 Michael Hanselmann
            val = vol['dev']
1621 dcb93971 Michael Hanselmann
          elif field == "vg":
1622 dcb93971 Michael Hanselmann
            val = vol['vg']
1623 dcb93971 Michael Hanselmann
          elif field == "name":
1624 dcb93971 Michael Hanselmann
            val = vol['name']
1625 dcb93971 Michael Hanselmann
          elif field == "size":
1626 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1627 dcb93971 Michael Hanselmann
          elif field == "instance":
1628 dcb93971 Michael Hanselmann
            for inst in ilist:
1629 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1630 dcb93971 Michael Hanselmann
                continue
1631 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1632 dcb93971 Michael Hanselmann
                val = inst.name
1633 dcb93971 Michael Hanselmann
                break
1634 dcb93971 Michael Hanselmann
            else:
1635 dcb93971 Michael Hanselmann
              val = '-'
1636 dcb93971 Michael Hanselmann
          else:
1637 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1638 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1639 dcb93971 Michael Hanselmann
1640 dcb93971 Michael Hanselmann
        output.append(node_output)
1641 dcb93971 Michael Hanselmann
1642 dcb93971 Michael Hanselmann
    return output
1643 dcb93971 Michael Hanselmann
1644 dcb93971 Michael Hanselmann
1645 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1646 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1647 a8083063 Iustin Pop

1648 a8083063 Iustin Pop
  """
1649 a8083063 Iustin Pop
  HPATH = "node-add"
1650 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1651 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1652 a8083063 Iustin Pop
1653 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1654 a8083063 Iustin Pop
    """Build hooks env.
1655 a8083063 Iustin Pop

1656 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1657 a8083063 Iustin Pop

1658 a8083063 Iustin Pop
    """
1659 a8083063 Iustin Pop
    env = {
1660 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1661 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1662 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1663 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1664 a8083063 Iustin Pop
      }
1665 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1666 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1667 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1668 a8083063 Iustin Pop
1669 a8083063 Iustin Pop
  def CheckPrereq(self):
1670 a8083063 Iustin Pop
    """Check prerequisites.
1671 a8083063 Iustin Pop

1672 a8083063 Iustin Pop
    This checks:
1673 a8083063 Iustin Pop
     - the new node is not already in the config
1674 a8083063 Iustin Pop
     - it is resolvable
1675 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1676 a8083063 Iustin Pop

1677 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1678 a8083063 Iustin Pop

1679 a8083063 Iustin Pop
    """
1680 a8083063 Iustin Pop
    node_name = self.op.node_name
1681 a8083063 Iustin Pop
    cfg = self.cfg
1682 a8083063 Iustin Pop
1683 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1684 a8083063 Iustin Pop
1685 bcf043c9 Iustin Pop
    node = dns_data.name
1686 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1687 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1688 a8083063 Iustin Pop
    if secondary_ip is None:
1689 a8083063 Iustin Pop
      secondary_ip = primary_ip
1690 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1691 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1692 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1693 e7c6e02b Michael Hanselmann
1694 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1695 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1696 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1697 e7c6e02b Michael Hanselmann
                                 node)
1698 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1699 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1700 a8083063 Iustin Pop
1701 a8083063 Iustin Pop
    for existing_node_name in node_list:
1702 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1703 e7c6e02b Michael Hanselmann
1704 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1705 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1706 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1707 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1708 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1709 e7c6e02b Michael Hanselmann
        continue
1710 e7c6e02b Michael Hanselmann
1711 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1712 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1713 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1714 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1715 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1716 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1717 a8083063 Iustin Pop
1718 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1719 a8083063 Iustin Pop
    # same as for the master
1720 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
1721 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1722 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1723 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1724 a8083063 Iustin Pop
      if master_singlehomed:
1725 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1726 3ecf6786 Iustin Pop
                                   " new node has one")
1727 a8083063 Iustin Pop
      else:
1728 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1729 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1730 a8083063 Iustin Pop
1731 a8083063 Iustin Pop
    # checks reachablity
1732 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1733 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1734 a8083063 Iustin Pop
1735 a8083063 Iustin Pop
    if not newbie_singlehomed:
1736 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1737 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1738 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1739 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1740 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1741 a8083063 Iustin Pop
1742 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1743 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1744 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1745 a8083063 Iustin Pop
1746 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1747 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1748 a8083063 Iustin Pop

1749 a8083063 Iustin Pop
    """
1750 a8083063 Iustin Pop
    new_node = self.new_node
1751 a8083063 Iustin Pop
    node = new_node.name
1752 a8083063 Iustin Pop
1753 a8083063 Iustin Pop
    # check connectivity
1754 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
1755 a8083063 Iustin Pop
    if result:
1756 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1757 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1758 a8083063 Iustin Pop
                    (node, result))
1759 a8083063 Iustin Pop
      else:
1760 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1761 3ecf6786 Iustin Pop
                                 " node version %s" %
1762 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1763 a8083063 Iustin Pop
    else:
1764 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1765 a8083063 Iustin Pop
1766 a8083063 Iustin Pop
    # setup ssh on node
1767 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1768 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1769 a8083063 Iustin Pop
    keyarray = []
1770 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1771 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1772 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1773 a8083063 Iustin Pop
1774 a8083063 Iustin Pop
    for i in keyfiles:
1775 a8083063 Iustin Pop
      f = open(i, 'r')
1776 a8083063 Iustin Pop
      try:
1777 a8083063 Iustin Pop
        keyarray.append(f.read())
1778 a8083063 Iustin Pop
      finally:
1779 a8083063 Iustin Pop
        f.close()
1780 a8083063 Iustin Pop
1781 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
1782 72737a7f Iustin Pop
                                    keyarray[2],
1783 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
1784 a8083063 Iustin Pop
1785 a8083063 Iustin Pop
    if not result:
1786 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1787 a8083063 Iustin Pop
1788 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1789 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1790 c8a0948f Michael Hanselmann
1791 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1792 caad16e2 Iustin Pop
      if not self.rpc.call_node_has_ip_address(new_node.name,
1793 caad16e2 Iustin Pop
                                               new_node.secondary_ip):
1794 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1795 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1796 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1797 a8083063 Iustin Pop
1798 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
1799 5c0527ed Guido Trotter
    node_verify_param = {
1800 5c0527ed Guido Trotter
      'nodelist': [node],
1801 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1802 5c0527ed Guido Trotter
    }
1803 5c0527ed Guido Trotter
1804 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
1805 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
1806 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1807 5c0527ed Guido Trotter
      if not result[verifier]:
1808 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1809 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1810 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1811 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1812 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1813 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1814 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1815 ff98055b Iustin Pop
1816 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1817 a8083063 Iustin Pop
    # including the node just added
1818 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
1819 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1820 102b115b Michael Hanselmann
    if not self.op.readd:
1821 102b115b Michael Hanselmann
      dist_nodes.append(node)
1822 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1823 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1824 a8083063 Iustin Pop
1825 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1826 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1827 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
1828 a8083063 Iustin Pop
      for to_node in dist_nodes:
1829 a8083063 Iustin Pop
        if not result[to_node]:
1830 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1831 a8083063 Iustin Pop
                       (fname, to_node))
1832 a8083063 Iustin Pop
1833 d6a02168 Michael Hanselmann
    to_copy = []
1834 00cd937c Iustin Pop
    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
1835 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1836 a8083063 Iustin Pop
    for fname in to_copy:
1837 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
1838 b5602d15 Guido Trotter
      if not result[node]:
1839 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1840 a8083063 Iustin Pop
1841 d8470559 Michael Hanselmann
    if self.op.readd:
1842 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1843 d8470559 Michael Hanselmann
    else:
1844 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1845 a8083063 Iustin Pop
1846 a8083063 Iustin Pop
1847 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1848 a8083063 Iustin Pop
  """Query cluster configuration.
1849 a8083063 Iustin Pop

1850 a8083063 Iustin Pop
  """
1851 a8083063 Iustin Pop
  _OP_REQP = []
1852 59322403 Iustin Pop
  REQ_MASTER = False
1853 642339cf Guido Trotter
  REQ_BGL = False
1854 642339cf Guido Trotter
1855 642339cf Guido Trotter
  def ExpandNames(self):
1856 642339cf Guido Trotter
    self.needed_locks = {}
1857 a8083063 Iustin Pop
1858 a8083063 Iustin Pop
  def CheckPrereq(self):
1859 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1860 a8083063 Iustin Pop

1861 a8083063 Iustin Pop
    """
1862 a8083063 Iustin Pop
    pass
1863 a8083063 Iustin Pop
1864 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1865 a8083063 Iustin Pop
    """Return cluster config.
1866 a8083063 Iustin Pop

1867 a8083063 Iustin Pop
    """
1868 a8083063 Iustin Pop
    result = {
1869 d6a02168 Michael Hanselmann
      "name": self.cfg.GetClusterName(),
1870 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1871 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1872 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1873 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1874 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1875 d6a02168 Michael Hanselmann
      "master": self.cfg.GetMasterNode(),
1876 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1877 d6a02168 Michael Hanselmann
      "hypervisor_type": self.cfg.GetHypervisorType(),
1878 e69d05fd Iustin Pop
      "enabled_hypervisors": self.cfg.GetClusterInfo().enabled_hypervisors,
1879 a8083063 Iustin Pop
      }
1880 a8083063 Iustin Pop
1881 a8083063 Iustin Pop
    return result
1882 a8083063 Iustin Pop
1883 a8083063 Iustin Pop
1884 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
1885 ae5849b5 Michael Hanselmann
  """Return configuration values.
1886 a8083063 Iustin Pop

1887 a8083063 Iustin Pop
  """
1888 a8083063 Iustin Pop
  _OP_REQP = []
1889 642339cf Guido Trotter
  REQ_BGL = False
1890 642339cf Guido Trotter
1891 642339cf Guido Trotter
  def ExpandNames(self):
1892 642339cf Guido Trotter
    self.needed_locks = {}
1893 a8083063 Iustin Pop
1894 3ccafd0e Iustin Pop
    static_fields = ["cluster_name", "master_node", "drain_flag"]
1895 ae5849b5 Michael Hanselmann
    _CheckOutputFields(static=static_fields,
1896 ae5849b5 Michael Hanselmann
                       dynamic=[],
1897 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
1898 ae5849b5 Michael Hanselmann
1899 a8083063 Iustin Pop
  def CheckPrereq(self):
1900 a8083063 Iustin Pop
    """No prerequisites.
1901 a8083063 Iustin Pop

1902 a8083063 Iustin Pop
    """
1903 a8083063 Iustin Pop
    pass
1904 a8083063 Iustin Pop
1905 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1906 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1907 a8083063 Iustin Pop

1908 a8083063 Iustin Pop
    """
1909 ae5849b5 Michael Hanselmann
    values = []
1910 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
1911 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
1912 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
1913 ae5849b5 Michael Hanselmann
      elif field == "master_node":
1914 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
1915 3ccafd0e Iustin Pop
      elif field == "drain_flag":
1916 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
1917 ae5849b5 Michael Hanselmann
      else:
1918 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
1919 3ccafd0e Iustin Pop
      values.append(entry)
1920 ae5849b5 Michael Hanselmann
    return values
1921 a8083063 Iustin Pop
1922 a8083063 Iustin Pop
1923 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1924 a8083063 Iustin Pop
  """Bring up an instance's disks.
1925 a8083063 Iustin Pop

1926 a8083063 Iustin Pop
  """
1927 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1928 f22a8ba3 Guido Trotter
  REQ_BGL = False
1929 f22a8ba3 Guido Trotter
1930 f22a8ba3 Guido Trotter
  def ExpandNames(self):
1931 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
1932 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
1933 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1934 f22a8ba3 Guido Trotter
1935 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
1936 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
1937 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
1938 a8083063 Iustin Pop
1939 a8083063 Iustin Pop
  def CheckPrereq(self):
1940 a8083063 Iustin Pop
    """Check prerequisites.
1941 a8083063 Iustin Pop

1942 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1943 a8083063 Iustin Pop

1944 a8083063 Iustin Pop
    """
1945 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1946 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
1947 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
1948 a8083063 Iustin Pop
1949 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1950 a8083063 Iustin Pop
    """Activate the disks.
1951 a8083063 Iustin Pop

1952 a8083063 Iustin Pop
    """
1953 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
1954 a8083063 Iustin Pop
    if not disks_ok:
1955 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1956 a8083063 Iustin Pop
1957 a8083063 Iustin Pop
    return disks_info
1958 a8083063 Iustin Pop
1959 a8083063 Iustin Pop
1960 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
1961 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1962 a8083063 Iustin Pop

1963 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1964 a8083063 Iustin Pop

1965 a8083063 Iustin Pop
  Args:
1966 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1967 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1968 a8083063 Iustin Pop
                        in an error return from the function
1969 a8083063 Iustin Pop

1970 a8083063 Iustin Pop
  Returns:
1971 a8083063 Iustin Pop
    false if the operation failed
1972 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1973 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1974 a8083063 Iustin Pop
  """
1975 a8083063 Iustin Pop
  device_info = []
1976 a8083063 Iustin Pop
  disks_ok = True
1977 fdbd668d Iustin Pop
  iname = instance.name
1978 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1979 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1980 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1981 fdbd668d Iustin Pop
1982 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1983 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1984 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1985 fdbd668d Iustin Pop
  # SyncSource, etc.)
1986 fdbd668d Iustin Pop
1987 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1988 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1989 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1990 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
1991 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
1992 a8083063 Iustin Pop
      if not result:
1993 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1994 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1995 fdbd668d Iustin Pop
        if not ignore_secondaries:
1996 a8083063 Iustin Pop
          disks_ok = False
1997 fdbd668d Iustin Pop
1998 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1999 fdbd668d Iustin Pop
2000 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2001 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2002 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2003 fdbd668d Iustin Pop
      if node != instance.primary_node:
2004 fdbd668d Iustin Pop
        continue
2005 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2006 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2007 fdbd668d Iustin Pop
      if not result:
2008 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
2009 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
2010 fdbd668d Iustin Pop
        disks_ok = False
2011 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
2012 a8083063 Iustin Pop
2013 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2014 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2015 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2016 b352ab5b Iustin Pop
  for disk in instance.disks:
2017 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2018 b352ab5b Iustin Pop
2019 a8083063 Iustin Pop
  return disks_ok, device_info
2020 a8083063 Iustin Pop
2021 a8083063 Iustin Pop
2022 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2023 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2024 3ecf6786 Iustin Pop

2025 3ecf6786 Iustin Pop
  """
2026 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2027 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2028 fe7b0351 Michael Hanselmann
  if not disks_ok:
2029 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2030 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2031 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
2032 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
2033 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2034 fe7b0351 Michael Hanselmann
2035 fe7b0351 Michael Hanselmann
2036 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2037 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2038 a8083063 Iustin Pop

2039 a8083063 Iustin Pop
  """
2040 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2041 f22a8ba3 Guido Trotter
  REQ_BGL = False
2042 f22a8ba3 Guido Trotter
2043 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2044 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2045 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2046 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2047 f22a8ba3 Guido Trotter
2048 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2049 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2050 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2051 a8083063 Iustin Pop
2052 a8083063 Iustin Pop
  def CheckPrereq(self):
2053 a8083063 Iustin Pop
    """Check prerequisites.
2054 a8083063 Iustin Pop

2055 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2056 a8083063 Iustin Pop

2057 a8083063 Iustin Pop
    """
2058 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2059 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2060 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2061 a8083063 Iustin Pop
2062 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2063 a8083063 Iustin Pop
    """Deactivate the disks
2064 a8083063 Iustin Pop

2065 a8083063 Iustin Pop
    """
2066 a8083063 Iustin Pop
    instance = self.instance
2067 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2068 a8083063 Iustin Pop
2069 a8083063 Iustin Pop
2070 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2071 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2072 155d6c75 Guido Trotter

2073 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2074 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2075 155d6c75 Guido Trotter

2076 155d6c75 Guido Trotter
  """
2077 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2078 72737a7f Iustin Pop
                                      [instance.hypervisor])
2079 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2080 155d6c75 Guido Trotter
  if not type(ins_l) is list:
2081 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2082 155d6c75 Guido Trotter
                             instance.primary_node)
2083 155d6c75 Guido Trotter
2084 155d6c75 Guido Trotter
  if instance.name in ins_l:
2085 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2086 155d6c75 Guido Trotter
                             " block devices.")
2087 155d6c75 Guido Trotter
2088 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2089 a8083063 Iustin Pop
2090 a8083063 Iustin Pop
2091 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2092 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2093 a8083063 Iustin Pop

2094 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2095 a8083063 Iustin Pop

2096 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2097 a8083063 Iustin Pop
  ignored.
2098 a8083063 Iustin Pop

2099 a8083063 Iustin Pop
  """
2100 a8083063 Iustin Pop
  result = True
2101 a8083063 Iustin Pop
  for disk in instance.disks:
2102 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2103 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2104 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_shutdown(node, top_disk):
2105 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
2106 a8083063 Iustin Pop
                     (disk.iv_name, node))
2107 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2108 a8083063 Iustin Pop
          result = False
2109 a8083063 Iustin Pop
  return result
2110 a8083063 Iustin Pop
2111 a8083063 Iustin Pop
2112 b9bddb6b Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
2113 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2114 d4f16fd9 Iustin Pop

2115 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2116 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2117 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2118 d4f16fd9 Iustin Pop
  exception.
2119 d4f16fd9 Iustin Pop

2120 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2121 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2122 e69d05fd Iustin Pop
  @type node: C{str}
2123 e69d05fd Iustin Pop
  @param node: the node to check
2124 e69d05fd Iustin Pop
  @type reason: C{str}
2125 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2126 e69d05fd Iustin Pop
  @type requested: C{int}
2127 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2128 e69d05fd Iustin Pop
  @type hypervisor: C{str}
2129 e69d05fd Iustin Pop
  @param hypervisor: the hypervisor to ask for memory stats
2130 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2131 e69d05fd Iustin Pop
      we cannot check the node
2132 d4f16fd9 Iustin Pop

2133 d4f16fd9 Iustin Pop
  """
2134 72737a7f Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
2135 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2136 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2137 d4f16fd9 Iustin Pop
                             " information" % (node,))
2138 d4f16fd9 Iustin Pop
2139 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2140 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2141 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2142 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2143 d4f16fd9 Iustin Pop
  if requested > free_mem:
2144 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2145 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2146 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2147 d4f16fd9 Iustin Pop
2148 d4f16fd9 Iustin Pop
2149 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2150 a8083063 Iustin Pop
  """Starts an instance.
2151 a8083063 Iustin Pop

2152 a8083063 Iustin Pop
  """
2153 a8083063 Iustin Pop
  HPATH = "instance-start"
2154 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2155 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2156 e873317a Guido Trotter
  REQ_BGL = False
2157 e873317a Guido Trotter
2158 e873317a Guido Trotter
  def ExpandNames(self):
2159 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2160 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2161 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2162 e873317a Guido Trotter
2163 e873317a Guido Trotter
  def DeclareLocks(self, level):
2164 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2165 e873317a Guido Trotter
      self._LockInstancesNodes()
2166 a8083063 Iustin Pop
2167 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2168 a8083063 Iustin Pop
    """Build hooks env.
2169 a8083063 Iustin Pop

2170 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2171 a8083063 Iustin Pop

2172 a8083063 Iustin Pop
    """
2173 a8083063 Iustin Pop
    env = {
2174 a8083063 Iustin Pop
      "FORCE": self.op.force,
2175 a8083063 Iustin Pop
      }
2176 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2177 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2178 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2179 a8083063 Iustin Pop
    return env, nl, nl
2180 a8083063 Iustin Pop
2181 a8083063 Iustin Pop
  def CheckPrereq(self):
2182 a8083063 Iustin Pop
    """Check prerequisites.
2183 a8083063 Iustin Pop

2184 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2185 a8083063 Iustin Pop

2186 a8083063 Iustin Pop
    """
2187 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2188 e873317a Guido Trotter
    assert self.instance is not None, \
2189 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2190 a8083063 Iustin Pop
2191 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2192 a8083063 Iustin Pop
    # check bridges existance
2193 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2194 a8083063 Iustin Pop
2195 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, instance.primary_node,
2196 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2197 338e51e8 Iustin Pop
                         bep[constants.BE_MEMORY], instance.hypervisor)
2198 d4f16fd9 Iustin Pop
2199 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2200 a8083063 Iustin Pop
    """Start the instance.
2201 a8083063 Iustin Pop

2202 a8083063 Iustin Pop
    """
2203 a8083063 Iustin Pop
    instance = self.instance
2204 a8083063 Iustin Pop
    force = self.op.force
2205 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2206 a8083063 Iustin Pop
2207 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2208 fe482621 Iustin Pop
2209 a8083063 Iustin Pop
    node_current = instance.primary_node
2210 a8083063 Iustin Pop
2211 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2212 a8083063 Iustin Pop
2213 72737a7f Iustin Pop
    if not self.rpc.call_instance_start(node_current, instance, extra_args):
2214 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2215 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2216 a8083063 Iustin Pop
2217 a8083063 Iustin Pop
2218 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2219 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2220 bf6929a2 Alexander Schreiber

2221 bf6929a2 Alexander Schreiber
  """
2222 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2223 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2224 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2225 e873317a Guido Trotter
  REQ_BGL = False
2226 e873317a Guido Trotter
2227 e873317a Guido Trotter
  def ExpandNames(self):
2228 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2229 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2230 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2231 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2232 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2233 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2234 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2235 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2236 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2237 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2238 e873317a Guido Trotter
2239 e873317a Guido Trotter
  def DeclareLocks(self, level):
2240 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2241 849da276 Guido Trotter
      primary_only = not constants.INSTANCE_REBOOT_FULL
2242 849da276 Guido Trotter
      self._LockInstancesNodes(primary_only=primary_only)
2243 bf6929a2 Alexander Schreiber
2244 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2245 bf6929a2 Alexander Schreiber
    """Build hooks env.
2246 bf6929a2 Alexander Schreiber

2247 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2248 bf6929a2 Alexander Schreiber

2249 bf6929a2 Alexander Schreiber
    """
2250 bf6929a2 Alexander Schreiber
    env = {
2251 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2252 bf6929a2 Alexander Schreiber
      }
2253 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2254 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2255 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2256 bf6929a2 Alexander Schreiber
    return env, nl, nl
2257 bf6929a2 Alexander Schreiber
2258 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2259 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2260 bf6929a2 Alexander Schreiber

2261 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2262 bf6929a2 Alexander Schreiber

2263 bf6929a2 Alexander Schreiber
    """
2264 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2265 e873317a Guido Trotter
    assert self.instance is not None, \
2266 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2267 bf6929a2 Alexander Schreiber
2268 bf6929a2 Alexander Schreiber
    # check bridges existance
2269 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2270 bf6929a2 Alexander Schreiber
2271 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2272 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2273 bf6929a2 Alexander Schreiber

2274 bf6929a2 Alexander Schreiber
    """
2275 bf6929a2 Alexander Schreiber
    instance = self.instance
2276 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2277 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2278 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2279 bf6929a2 Alexander Schreiber
2280 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2281 bf6929a2 Alexander Schreiber
2282 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2283 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2284 72737a7f Iustin Pop
      if not self.rpc.call_instance_reboot(node_current, instance,
2285 72737a7f Iustin Pop
                                           reboot_type, extra_args):
2286 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2287 bf6929a2 Alexander Schreiber
    else:
2288 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(node_current, instance):
2289 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2290 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2291 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
2292 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(node_current, instance, extra_args):
2293 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2294 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2295 bf6929a2 Alexander Schreiber
2296 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2297 bf6929a2 Alexander Schreiber
2298 bf6929a2 Alexander Schreiber
2299 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2300 a8083063 Iustin Pop
  """Shutdown an instance.
2301 a8083063 Iustin Pop

2302 a8083063 Iustin Pop
  """
2303 a8083063 Iustin Pop
  HPATH = "instance-stop"
2304 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2305 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2306 e873317a Guido Trotter
  REQ_BGL = False
2307 e873317a Guido Trotter
2308 e873317a Guido Trotter
  def ExpandNames(self):
2309 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2310 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2311 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2312 e873317a Guido Trotter
2313 e873317a Guido Trotter
  def DeclareLocks(self, level):
2314 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2315 e873317a Guido Trotter
      self._LockInstancesNodes()
2316 a8083063 Iustin Pop
2317 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2318 a8083063 Iustin Pop
    """Build hooks env.
2319 a8083063 Iustin Pop

2320 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2321 a8083063 Iustin Pop

2322 a8083063 Iustin Pop
    """
2323 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2324 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2325 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2326 a8083063 Iustin Pop
    return env, nl, nl
2327 a8083063 Iustin Pop
2328 a8083063 Iustin Pop
  def CheckPrereq(self):
2329 a8083063 Iustin Pop
    """Check prerequisites.
2330 a8083063 Iustin Pop

2331 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2332 a8083063 Iustin Pop

2333 a8083063 Iustin Pop
    """
2334 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2335 e873317a Guido Trotter
    assert self.instance is not None, \
2336 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2337 a8083063 Iustin Pop
2338 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2339 a8083063 Iustin Pop
    """Shutdown the instance.
2340 a8083063 Iustin Pop

2341 a8083063 Iustin Pop
    """
2342 a8083063 Iustin Pop
    instance = self.instance
2343 a8083063 Iustin Pop
    node_current = instance.primary_node
2344 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2345 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(node_current, instance):
2346 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2347 a8083063 Iustin Pop
2348 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
2349 a8083063 Iustin Pop
2350 a8083063 Iustin Pop
2351 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2352 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2353 fe7b0351 Michael Hanselmann

2354 fe7b0351 Michael Hanselmann
  """
2355 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2356 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2357 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2358 4e0b4d2d Guido Trotter
  REQ_BGL = False
2359 4e0b4d2d Guido Trotter
2360 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2361 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2362 4e0b4d2d Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2363 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2364 4e0b4d2d Guido Trotter
2365 4e0b4d2d Guido Trotter
  def DeclareLocks(self, level):
2366 4e0b4d2d Guido Trotter
    if level == locking.LEVEL_NODE:
2367 4e0b4d2d Guido Trotter
      self._LockInstancesNodes()
2368 fe7b0351 Michael Hanselmann
2369 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2370 fe7b0351 Michael Hanselmann
    """Build hooks env.
2371 fe7b0351 Michael Hanselmann

2372 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2373 fe7b0351 Michael Hanselmann

2374 fe7b0351 Michael Hanselmann
    """
2375 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2376 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2377 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2378 fe7b0351 Michael Hanselmann
    return env, nl, nl
2379 fe7b0351 Michael Hanselmann
2380 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2381 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2382 fe7b0351 Michael Hanselmann

2383 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2384 fe7b0351 Michael Hanselmann

2385 fe7b0351 Michael Hanselmann
    """
2386 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2387 4e0b4d2d Guido Trotter
    assert instance is not None, \
2388 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2389 4e0b4d2d Guido Trotter
2390 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2391 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2392 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2393 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2394 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2395 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2396 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2397 72737a7f Iustin Pop
                                              instance.name,
2398 72737a7f Iustin Pop
                                              instance.hypervisor)
2399 fe7b0351 Michael Hanselmann
    if remote_info:
2400 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2401 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2402 3ecf6786 Iustin Pop
                                  instance.primary_node))
2403 d0834de3 Michael Hanselmann
2404 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2405 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2406 d0834de3 Michael Hanselmann
      # OS verification
2407 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2408 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2409 d0834de3 Michael Hanselmann
      if pnode is None:
2410 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2411 3ecf6786 Iustin Pop
                                   self.op.pnode)
2412 72737a7f Iustin Pop
      os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
2413 dfa96ded Guido Trotter
      if not os_obj:
2414 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2415 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2416 d0834de3 Michael Hanselmann
2417 fe7b0351 Michael Hanselmann
    self.instance = instance
2418 fe7b0351 Michael Hanselmann
2419 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2420 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2421 fe7b0351 Michael Hanselmann

2422 fe7b0351 Michael Hanselmann
    """
2423 fe7b0351 Michael Hanselmann
    inst = self.instance
2424 fe7b0351 Michael Hanselmann
2425 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2426 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2427 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2428 97abc79f Iustin Pop
      self.cfg.Update(inst)
2429 d0834de3 Michael Hanselmann
2430 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2431 fe7b0351 Michael Hanselmann
    try:
2432 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2433 72737a7f Iustin Pop
      if not self.rpc.call_instance_os_add(inst.primary_node, inst,
2434 72737a7f Iustin Pop
                                           "sda", "sdb"):
2435 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2436 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2437 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2438 fe7b0351 Michael Hanselmann
    finally:
2439 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2440 fe7b0351 Michael Hanselmann
2441 fe7b0351 Michael Hanselmann
2442 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2443 decd5f45 Iustin Pop
  """Rename an instance.
2444 decd5f45 Iustin Pop

2445 decd5f45 Iustin Pop
  """
2446 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2447 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2448 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2449 decd5f45 Iustin Pop
2450 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2451 decd5f45 Iustin Pop
    """Build hooks env.
2452 decd5f45 Iustin Pop

2453 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2454 decd5f45 Iustin Pop

2455 decd5f45 Iustin Pop
    """
2456 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2457 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2458 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2459 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2460 decd5f45 Iustin Pop
    return env, nl, nl
2461 decd5f45 Iustin Pop
2462 decd5f45 Iustin Pop
  def CheckPrereq(self):
2463 decd5f45 Iustin Pop
    """Check prerequisites.
2464 decd5f45 Iustin Pop

2465 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2466 decd5f45 Iustin Pop

2467 decd5f45 Iustin Pop
    """
2468 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2469 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2470 decd5f45 Iustin Pop
    if instance is None:
2471 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2472 decd5f45 Iustin Pop
                                 self.op.instance_name)
2473 decd5f45 Iustin Pop
    if instance.status != "down":
2474 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2475 decd5f45 Iustin Pop
                                 self.op.instance_name)
2476 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2477 72737a7f Iustin Pop
                                              instance.name,
2478 72737a7f Iustin Pop
                                              instance.hypervisor)
2479 decd5f45 Iustin Pop
    if remote_info:
2480 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2481 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2482 decd5f45 Iustin Pop
                                  instance.primary_node))
2483 decd5f45 Iustin Pop
    self.instance = instance
2484 decd5f45 Iustin Pop
2485 decd5f45 Iustin Pop
    # new name verification
2486 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2487 decd5f45 Iustin Pop
2488 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2489 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2490 7bde3275 Guido Trotter
    if new_name in instance_list:
2491 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2492 c09f363f Manuel Franceschini
                                 new_name)
2493 7bde3275 Guido Trotter
2494 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2495 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2496 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2497 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2498 decd5f45 Iustin Pop
2499 decd5f45 Iustin Pop
2500 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2501 decd5f45 Iustin Pop
    """Reinstall the instance.
2502 decd5f45 Iustin Pop

2503 decd5f45 Iustin Pop
    """
2504 decd5f45 Iustin Pop
    inst = self.instance
2505 decd5f45 Iustin Pop
    old_name = inst.name
2506 decd5f45 Iustin Pop
2507 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2508 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2509 b23c4333 Manuel Franceschini
2510 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2511 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2512 74b5913f Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
2513 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2514 decd5f45 Iustin Pop
2515 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2516 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2517 decd5f45 Iustin Pop
2518 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2519 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2520 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2521 72737a7f Iustin Pop
                                                     old_file_storage_dir,
2522 72737a7f Iustin Pop
                                                     new_file_storage_dir)
2523 b23c4333 Manuel Franceschini
2524 b23c4333 Manuel Franceschini
      if not result:
2525 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2526 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2527 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2528 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2529 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2530 b23c4333 Manuel Franceschini
2531 b23c4333 Manuel Franceschini
      if not result[0]:
2532 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2533 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2534 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2535 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2536 b23c4333 Manuel Franceschini
2537 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2538 decd5f45 Iustin Pop
    try:
2539 72737a7f Iustin Pop
      if not self.rpc.call_instance_run_rename(inst.primary_node, inst,
2540 72737a7f Iustin Pop
                                               old_name,
2541 72737a7f Iustin Pop
                                               "sda", "sdb"):
2542 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2543 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2544 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2545 decd5f45 Iustin Pop
        logger.Error(msg)
2546 decd5f45 Iustin Pop
    finally:
2547 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2548 decd5f45 Iustin Pop
2549 decd5f45 Iustin Pop
2550 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2551 a8083063 Iustin Pop
  """Remove an instance.
2552 a8083063 Iustin Pop

2553 a8083063 Iustin Pop
  """
2554 a8083063 Iustin Pop
  HPATH = "instance-remove"
2555 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2556 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2557 cf472233 Guido Trotter
  REQ_BGL = False
2558 cf472233 Guido Trotter
2559 cf472233 Guido Trotter
  def ExpandNames(self):
2560 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
2561 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2562 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2563 cf472233 Guido Trotter
2564 cf472233 Guido Trotter
  def DeclareLocks(self, level):
2565 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
2566 cf472233 Guido Trotter
      self._LockInstancesNodes()
2567 a8083063 Iustin Pop
2568 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2569 a8083063 Iustin Pop
    """Build hooks env.
2570 a8083063 Iustin Pop

2571 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2572 a8083063 Iustin Pop

2573 a8083063 Iustin Pop
    """
2574 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2575 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
2576 a8083063 Iustin Pop
    return env, nl, nl
2577 a8083063 Iustin Pop
2578 a8083063 Iustin Pop
  def CheckPrereq(self):
2579 a8083063 Iustin Pop
    """Check prerequisites.
2580 a8083063 Iustin Pop

2581 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2582 a8083063 Iustin Pop

2583 a8083063 Iustin Pop
    """
2584 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2585 cf472233 Guido Trotter
    assert self.instance is not None, \
2586 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2587 a8083063 Iustin Pop
2588 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2589 a8083063 Iustin Pop
    """Remove the instance.
2590 a8083063 Iustin Pop

2591 a8083063 Iustin Pop
    """
2592 a8083063 Iustin Pop
    instance = self.instance
2593 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2594 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2595 a8083063 Iustin Pop
2596 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(instance.primary_node, instance):
2597 1d67656e Iustin Pop
      if self.op.ignore_failures:
2598 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2599 1d67656e Iustin Pop
      else:
2600 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2601 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2602 a8083063 Iustin Pop
2603 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2604 a8083063 Iustin Pop
2605 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
2606 1d67656e Iustin Pop
      if self.op.ignore_failures:
2607 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2608 1d67656e Iustin Pop
      else:
2609 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2610 a8083063 Iustin Pop
2611 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2612 a8083063 Iustin Pop
2613 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2614 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2615 a8083063 Iustin Pop
2616 a8083063 Iustin Pop
2617 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2618 a8083063 Iustin Pop
  """Logical unit for querying instances.
2619 a8083063 Iustin Pop

2620 a8083063 Iustin Pop
  """
2621 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2622 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2623 a8083063 Iustin Pop
2624 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2625 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2626 338e51e8 Iustin Pop
    hvp = ["hv/%s" % name for name in constants.HVS_PARAMETERS]
2627 338e51e8 Iustin Pop
    bep = ["be/%s" % name for name in constants.BES_PARAMETERS]
2628 57a2fb91 Iustin Pop
    self.static_fields = frozenset([
2629 57a2fb91 Iustin Pop
      "name", "os", "pnode", "snodes",
2630 57a2fb91 Iustin Pop
      "admin_state", "admin_ram",
2631 57a2fb91 Iustin Pop
      "disk_template", "ip", "mac", "bridge",
2632 57a2fb91 Iustin Pop
      "sda_size", "sdb_size", "vcpus", "tags",
2633 5018a335 Iustin Pop
      "network_port",
2634 5018a335 Iustin Pop
      "serial_no", "hypervisor", "hvparams",
2635 338e51e8 Iustin Pop
      ] + hvp + bep)
2636 338e51e8 Iustin Pop
2637 57a2fb91 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
2638 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2639 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2640 a8083063 Iustin Pop
2641 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2642 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2643 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2644 7eb9d8f7 Guido Trotter
2645 57a2fb91 Iustin Pop
    if self.op.names:
2646 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
2647 7eb9d8f7 Guido Trotter
    else:
2648 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
2649 7eb9d8f7 Guido Trotter
2650 57a2fb91 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
2651 57a2fb91 Iustin Pop
    if self.do_locking:
2652 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2653 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
2654 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2655 7eb9d8f7 Guido Trotter
2656 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2657 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
2658 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2659 7eb9d8f7 Guido Trotter
2660 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2661 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2662 7eb9d8f7 Guido Trotter

2663 7eb9d8f7 Guido Trotter
    """
2664 57a2fb91 Iustin Pop
    pass
2665 069dcc86 Iustin Pop
2666 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2667 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2668 a8083063 Iustin Pop

2669 a8083063 Iustin Pop
    """
2670 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
2671 57a2fb91 Iustin Pop
    if self.do_locking:
2672 57a2fb91 Iustin Pop
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2673 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2674 3fa93523 Guido Trotter
      instance_names = self.wanted
2675 3fa93523 Guido Trotter
      missing = set(instance_names).difference(all_info.keys())
2676 3fa93523 Guido Trotter
      if missing:
2677 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2678 3fa93523 Guido Trotter
          "Some instances were removed before retrieving their data: %s"
2679 3fa93523 Guido Trotter
          % missing)
2680 57a2fb91 Iustin Pop
    else:
2681 57a2fb91 Iustin Pop
      instance_names = all_info.keys()
2682 c1f1cbb2 Iustin Pop
2683 c1f1cbb2 Iustin Pop
    instance_names = utils.NiceSort(instance_names)
2684 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
2685 a8083063 Iustin Pop
2686 a8083063 Iustin Pop
    # begin data gathering
2687 a8083063 Iustin Pop
2688 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2689 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
2690 a8083063 Iustin Pop
2691 a8083063 Iustin Pop
    bad_nodes = []
2692 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2693 a8083063 Iustin Pop
      live_data = {}
2694 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
2695 a8083063 Iustin Pop
      for name in nodes:
2696 a8083063 Iustin Pop
        result = node_data[name]
2697 a8083063 Iustin Pop
        if result:
2698 a8083063 Iustin Pop
          live_data.update(result)
2699 a8083063 Iustin Pop
        elif result == False:
2700 a8083063 Iustin Pop
          bad_nodes.append(name)
2701 a8083063 Iustin Pop
        # else no instance is alive
2702 a8083063 Iustin Pop
    else:
2703 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2704 a8083063 Iustin Pop
2705 a8083063 Iustin Pop
    # end data gathering
2706 a8083063 Iustin Pop
2707 5018a335 Iustin Pop
    HVPREFIX = "hv/"
2708 338e51e8 Iustin Pop
    BEPREFIX = "be/"
2709 a8083063 Iustin Pop
    output = []
2710 a8083063 Iustin Pop
    for instance in instance_list:
2711 a8083063 Iustin Pop
      iout = []
2712 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
2713 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
2714 a8083063 Iustin Pop
      for field in self.op.output_fields:
2715 a8083063 Iustin Pop
        if field == "name":
2716 a8083063 Iustin Pop
          val = instance.name
2717 a8083063 Iustin Pop
        elif field == "os":
2718 a8083063 Iustin Pop
          val = instance.os
2719 a8083063 Iustin Pop
        elif field == "pnode":
2720 a8083063 Iustin Pop
          val = instance.primary_node
2721 a8083063 Iustin Pop
        elif field == "snodes":
2722 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2723 a8083063 Iustin Pop
        elif field == "admin_state":
2724 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2725 a8083063 Iustin Pop
        elif field == "oper_state":
2726 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2727 8a23d2d3 Iustin Pop
            val = None
2728 a8083063 Iustin Pop
          else:
2729 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2730 d8052456 Iustin Pop
        elif field == "status":
2731 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2732 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2733 d8052456 Iustin Pop
          else:
2734 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2735 d8052456 Iustin Pop
            if running:
2736 d8052456 Iustin Pop
              if instance.status != "down":
2737 d8052456 Iustin Pop
                val = "running"
2738 d8052456 Iustin Pop
              else:
2739 d8052456 Iustin Pop
                val = "ERROR_up"
2740 d8052456 Iustin Pop
            else:
2741 d8052456 Iustin Pop
              if instance.status != "down":
2742 d8052456 Iustin Pop
                val = "ERROR_down"
2743 d8052456 Iustin Pop
              else:
2744 d8052456 Iustin Pop
                val = "ADMIN_down"
2745 a8083063 Iustin Pop
        elif field == "oper_ram":
2746 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2747 8a23d2d3 Iustin Pop
            val = None
2748 a8083063 Iustin Pop
          elif instance.name in live_data:
2749 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2750 a8083063 Iustin Pop
          else:
2751 a8083063 Iustin Pop
            val = "-"
2752 a8083063 Iustin Pop
        elif field == "disk_template":
2753 a8083063 Iustin Pop
          val = instance.disk_template
2754 a8083063 Iustin Pop
        elif field == "ip":
2755 a8083063 Iustin Pop
          val = instance.nics[0].ip
2756 a8083063 Iustin Pop
        elif field == "bridge":
2757 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2758 a8083063 Iustin Pop
        elif field == "mac":
2759 a8083063 Iustin Pop
          val = instance.nics[0].mac
2760 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2761 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2762 644eeef9 Iustin Pop
          if disk is None:
2763 8a23d2d3 Iustin Pop
            val = None
2764 644eeef9 Iustin Pop
          else:
2765 644eeef9 Iustin Pop
            val = disk.size
2766 130a6a6f Iustin Pop
        elif field == "tags":
2767 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2768 38d7239a Iustin Pop
        elif field == "serial_no":
2769 38d7239a Iustin Pop
          val = instance.serial_no
2770 5018a335 Iustin Pop
        elif field == "network_port":
2771 5018a335 Iustin Pop
          val = instance.network_port
2772 338e51e8 Iustin Pop
        elif field == "hypervisor":
2773 338e51e8 Iustin Pop
          val = instance.hypervisor
2774 338e51e8 Iustin Pop
        elif field == "hvparams":
2775 338e51e8 Iustin Pop
          val = i_hv
2776 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
2777 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
2778 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
2779 338e51e8 Iustin Pop
        elif field == "beparams":
2780 338e51e8 Iustin Pop
          val = i_be
2781 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
2782 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
2783 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
2784 a8083063 Iustin Pop
        else:
2785 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2786 a8083063 Iustin Pop
        iout.append(val)
2787 a8083063 Iustin Pop
      output.append(iout)
2788 a8083063 Iustin Pop
2789 a8083063 Iustin Pop
    return output
2790 a8083063 Iustin Pop
2791 a8083063 Iustin Pop
2792 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2793 a8083063 Iustin Pop
  """Failover an instance.
2794 a8083063 Iustin Pop

2795 a8083063 Iustin Pop
  """
2796 a8083063 Iustin Pop
  HPATH = "instance-failover"
2797 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2798 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2799 c9e5c064 Guido Trotter
  REQ_BGL = False
2800 c9e5c064 Guido Trotter
2801 c9e5c064 Guido Trotter
  def ExpandNames(self):
2802 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2803 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2804 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2805 c9e5c064 Guido Trotter
2806 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2807 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2808 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2809 a8083063 Iustin Pop
2810 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2811 a8083063 Iustin Pop
    """Build hooks env.
2812 a8083063 Iustin Pop

2813 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2814 a8083063 Iustin Pop

2815 a8083063 Iustin Pop
    """
2816 a8083063 Iustin Pop
    env = {
2817 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2818 a8083063 Iustin Pop
      }
2819 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2820 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
2821 a8083063 Iustin Pop
    return env, nl, nl
2822 a8083063 Iustin Pop
2823 a8083063 Iustin Pop
  def CheckPrereq(self):
2824 a8083063 Iustin Pop
    """Check prerequisites.
2825 a8083063 Iustin Pop

2826 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2827 a8083063 Iustin Pop

2828 a8083063 Iustin Pop
    """
2829 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2830 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2831 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2832 a8083063 Iustin Pop
2833 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2834 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2835 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2836 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2837 2a710df1 Michael Hanselmann
2838 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2839 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2840 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2841 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2842 2a710df1 Michael Hanselmann
2843 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2844 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2845 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
2846 338e51e8 Iustin Pop
                         instance.name, bep[constants.BE_MEMORY],
2847 e69d05fd Iustin Pop
                         instance.hypervisor)
2848 3a7c308e Guido Trotter
2849 a8083063 Iustin Pop
    # check bridge existance
2850 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2851 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(target_node, brlist):
2852 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2853 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2854 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2855 a8083063 Iustin Pop
2856 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2857 a8083063 Iustin Pop
    """Failover an instance.
2858 a8083063 Iustin Pop

2859 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2860 a8083063 Iustin Pop
    starting it on the secondary.
2861 a8083063 Iustin Pop

2862 a8083063 Iustin Pop
    """
2863 a8083063 Iustin Pop
    instance = self.instance
2864 a8083063 Iustin Pop
2865 a8083063 Iustin Pop
    source_node = instance.primary_node
2866 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2867 a8083063 Iustin Pop
2868 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2869 a8083063 Iustin Pop
    for dev in instance.disks:
2870 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2871 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
2872 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2873 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2874 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2875 a8083063 Iustin Pop
2876 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2877 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2878 a8083063 Iustin Pop
                (instance.name, source_node))
2879 a8083063 Iustin Pop
2880 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(source_node, instance):
2881 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2882 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2883 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2884 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2885 24a40d57 Iustin Pop
      else:
2886 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2887 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2888 a8083063 Iustin Pop
2889 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2890 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
2891 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2892 a8083063 Iustin Pop
2893 a8083063 Iustin Pop
    instance.primary_node = target_node
2894 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2895 b6102dab Guido Trotter
    self.cfg.Update(instance)
2896 a8083063 Iustin Pop
2897 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2898 12a0cfbe Guido Trotter
    if instance.status == "up":
2899 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2900 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2901 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2902 12a0cfbe Guido Trotter
2903 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
2904 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2905 12a0cfbe Guido Trotter
      if not disks_ok:
2906 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2907 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2908 a8083063 Iustin Pop
2909 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2910 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(target_node, instance, None):
2911 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2912 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2913 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2914 a8083063 Iustin Pop
2915 a8083063 Iustin Pop
2916 b9bddb6b Iustin Pop
def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
2917 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2918 a8083063 Iustin Pop

2919 a8083063 Iustin Pop
  This always creates all devices.
2920 a8083063 Iustin Pop

2921 a8083063 Iustin Pop
  """
2922 a8083063 Iustin Pop
  if device.children:
2923 a8083063 Iustin Pop
    for child in device.children:
2924 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
2925 a8083063 Iustin Pop
        return False
2926 a8083063 Iustin Pop
2927 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
2928 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
2929 72737a7f Iustin Pop
                                       instance.name, True, info)
2930 a8083063 Iustin Pop
  if not new_id:
2931 a8083063 Iustin Pop
    return False
2932 a8083063 Iustin Pop
  if device.physical_id is None:
2933 a8083063 Iustin Pop
    device.physical_id = new_id
2934 a8083063 Iustin Pop
  return True
2935 a8083063 Iustin Pop
2936 a8083063 Iustin Pop
2937 b9bddb6b Iustin Pop
def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
2938 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2939 a8083063 Iustin Pop

2940 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2941 a8083063 Iustin Pop
  all its children.
2942 a8083063 Iustin Pop

2943 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2944 a8083063 Iustin Pop

2945 a8083063 Iustin Pop
  """
2946 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2947 a8083063 Iustin Pop
    force = True
2948 a8083063 Iustin Pop
  if device.children:
2949 a8083063 Iustin Pop
    for child in device.children:
2950 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, node, instance,
2951 3f78eef2 Iustin Pop
                                        child, force, info):
2952 a8083063 Iustin Pop
        return False
2953 a8083063 Iustin Pop
2954 a8083063 Iustin Pop
  if not force:
2955 a8083063 Iustin Pop
    return True
2956 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
2957 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
2958 72737a7f Iustin Pop
                                       instance.name, False, info)
2959 a8083063 Iustin Pop
  if not new_id:
2960 a8083063 Iustin Pop
    return False
2961 a8083063 Iustin Pop
  if device.physical_id is None:
2962 a8083063 Iustin Pop
    device.physical_id = new_id
2963 a8083063 Iustin Pop
  return True
2964 a8083063 Iustin Pop
2965 a8083063 Iustin Pop
2966 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
2967 923b1523 Iustin Pop
  """Generate a suitable LV name.
2968 923b1523 Iustin Pop

2969 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2970 923b1523 Iustin Pop

2971 923b1523 Iustin Pop
  """
2972 923b1523 Iustin Pop
  results = []
2973 923b1523 Iustin Pop
  for val in exts:
2974 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
2975 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2976 923b1523 Iustin Pop
  return results
2977 923b1523 Iustin Pop
2978 923b1523 Iustin Pop
2979 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
2980 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
2981 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2982 a1f445d3 Iustin Pop

2983 a1f445d3 Iustin Pop
  """
2984 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
2985 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
2986 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
2987 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2988 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2989 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2990 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2991 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2992 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
2993 f9518d38 Iustin Pop
                                      p_minor, s_minor,
2994 f9518d38 Iustin Pop
                                      shared_secret),
2995 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
2996 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2997 a1f445d3 Iustin Pop
  return drbd_dev
2998 a1f445d3 Iustin Pop
2999 7c0d6283 Michael Hanselmann
3000 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
3001 a8083063 Iustin Pop
                          instance_name, primary_node,
3002 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
3003 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
3004 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
3005 a8083063 Iustin Pop

3006 a8083063 Iustin Pop
  """
3007 a8083063 Iustin Pop
  #TODO: compute space requirements
3008 a8083063 Iustin Pop
3009 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3010 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
3011 a8083063 Iustin Pop
    disks = []
3012 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
3013 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
3014 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3015 923b1523 Iustin Pop
3016 b9bddb6b Iustin Pop
    names = _GenerateUniqueNames(lu, [".sda", ".sdb"])
3017 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
3018 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
3019 a8083063 Iustin Pop
                           iv_name = "sda")
3020 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
3021 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
3022 a8083063 Iustin Pop
                           iv_name = "sdb")
3023 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
3024 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
3025 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
3026 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3027 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
3028 ffa1c0dc Iustin Pop
    (minor_pa, minor_pb,
3029 b9bddb6b Iustin Pop
     minor_sa, minor_sb) = lu.cfg.AllocateDRBDMinor(
3030 a1578d63 Iustin Pop
      [primary_node, primary_node, remote_node, remote_node], instance_name)
3031 ffa1c0dc Iustin Pop
3032 b9bddb6b Iustin Pop
    names = _GenerateUniqueNames(lu, [".sda_data", ".sda_meta",
3033 b9bddb6b Iustin Pop
                                      ".sdb_data", ".sdb_meta"])
3034 b9bddb6b Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3035 ffa1c0dc Iustin Pop
                                        disk_sz, names[0:2], "sda",
3036 ffa1c0dc Iustin Pop
                                        minor_pa, minor_sa)
3037 b9bddb6b Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3038 ffa1c0dc Iustin Pop
                                        swap_sz, names[2:4], "sdb",
3039 ffa1c0dc Iustin Pop
                                        minor_pb, minor_sb)
3040 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
3041 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3042 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3043 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3044 0f1a06e3 Manuel Franceschini
3045 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
3046 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
3047 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
3048 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
3049 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
3050 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
3051 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
3052 a8083063 Iustin Pop
  else:
3053 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3054 a8083063 Iustin Pop
  return disks
3055 a8083063 Iustin Pop
3056 a8083063 Iustin Pop
3057 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3058 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3059 3ecf6786 Iustin Pop

3060 3ecf6786 Iustin Pop
  """
3061 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3062 a0c3fea1 Michael Hanselmann
3063 a0c3fea1 Michael Hanselmann
3064 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
3065 a8083063 Iustin Pop
  """Create all disks for an instance.
3066 a8083063 Iustin Pop

3067 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3068 a8083063 Iustin Pop

3069 a8083063 Iustin Pop
  Args:
3070 a8083063 Iustin Pop
    instance: the instance object
3071 a8083063 Iustin Pop

3072 a8083063 Iustin Pop
  Returns:
3073 a8083063 Iustin Pop
    True or False showing the success of the creation process
3074 a8083063 Iustin Pop

3075 a8083063 Iustin Pop
  """
3076 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3077 a0c3fea1 Michael Hanselmann
3078 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3079 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3080 72737a7f Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(instance.primary_node,
3081 72737a7f Iustin Pop
                                                 file_storage_dir)
3082 0f1a06e3 Manuel Franceschini
3083 0f1a06e3 Manuel Franceschini
    if not result:
3084 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
3085 0f1a06e3 Manuel Franceschini
      return False
3086 0f1a06e3 Manuel Franceschini
3087 0f1a06e3 Manuel Franceschini
    if not result[0]:
3088 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
3089 0f1a06e3 Manuel Franceschini
      return False
3090 0f1a06e3 Manuel Franceschini
3091 a8083063 Iustin Pop
  for device in instance.disks:
3092 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
3093 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
3094 a8083063 Iustin Pop
    #HARDCODE
3095 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3096 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
3097 3f78eef2 Iustin Pop
                                        device, False, info):
3098 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
3099 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
3100 a8083063 Iustin Pop
        return False
3101 a8083063 Iustin Pop
    #HARDCODE
3102 b9bddb6b Iustin Pop
    if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
3103 3f78eef2 Iustin Pop
                                    instance, device, info):
3104 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
3105 a8083063 Iustin Pop
                   device.iv_name)
3106 a8083063 Iustin Pop
      return False
3107 1c6e3627 Manuel Franceschini
3108 a8083063 Iustin Pop
  return True
3109 a8083063 Iustin Pop
3110 a8083063 Iustin Pop
3111 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
3112 a8083063 Iustin Pop
  """Remove all disks for an instance.
3113 a8083063 Iustin Pop

3114 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3115 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3116 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3117 a8083063 Iustin Pop
  with `_CreateDisks()`).
3118 a8083063 Iustin Pop

3119 a8083063 Iustin Pop
  Args:
3120 a8083063 Iustin Pop
    instance: the instance object
3121 a8083063 Iustin Pop

3122 a8083063 Iustin Pop
  Returns:
3123 a8083063 Iustin Pop
    True or False showing the success of the removal proces
3124 a8083063 Iustin Pop

3125 a8083063 Iustin Pop
  """
3126 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
3127 a8083063 Iustin Pop
3128 a8083063 Iustin Pop
  result = True
3129 a8083063 Iustin Pop
  for device in instance.disks:
3130 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3131 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
3132 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_remove(node, disk):
3133 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
3134 a8083063 Iustin Pop
                     " continuing anyway" %
3135 a8083063 Iustin Pop
                     (device.iv_name, node))
3136 a8083063 Iustin Pop
        result = False
3137 0f1a06e3 Manuel Franceschini
3138 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3139 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3140 72737a7f Iustin Pop
    if not lu.rpc.call_file_storage_dir_remove(instance.primary_node,
3141 72737a7f Iustin Pop
                                               file_storage_dir):
3142 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
3143 0f1a06e3 Manuel Franceschini
      result = False
3144 0f1a06e3 Manuel Franceschini
3145 a8083063 Iustin Pop
  return result
3146 a8083063 Iustin Pop
3147 a8083063 Iustin Pop
3148 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
3149 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3150 e2fe6369 Iustin Pop

3151 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
3152 e2fe6369 Iustin Pop

3153 e2fe6369 Iustin Pop
  """
3154 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3155 e2fe6369 Iustin Pop
  req_size_dict = {
3156 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3157 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
3158 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
3159 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
3160 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3161 e2fe6369 Iustin Pop
  }
3162 e2fe6369 Iustin Pop
3163 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3164 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3165 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3166 e2fe6369 Iustin Pop
3167 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3168 e2fe6369 Iustin Pop
3169 e2fe6369 Iustin Pop
3170 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
3171 74409b12 Iustin Pop
  """Hypervisor parameter validation.
3172 74409b12 Iustin Pop

3173 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
3174 74409b12 Iustin Pop
  used in both instance create and instance modify.
3175 74409b12 Iustin Pop

3176 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
3177 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
3178 74409b12 Iustin Pop
  @type nodenames: list
3179 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
3180 74409b12 Iustin Pop
  @type hvname: string
3181 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
3182 74409b12 Iustin Pop
  @type hvparams: dict
3183 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
3184 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
3185 74409b12 Iustin Pop

3186 74409b12 Iustin Pop
  """
3187 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
3188 74409b12 Iustin Pop
                                                  hvname,
3189 74409b12 Iustin Pop
                                                  hvparams)
3190 74409b12 Iustin Pop
  for node in nodenames:
3191 74409b12 Iustin Pop
    info = hvinfo.get(node, None)
3192 74409b12 Iustin Pop
    if not info or not isinstance(info, (tuple, list)):
3193 74409b12 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
3194 74409b12 Iustin Pop
                                 " from node '%s' (%s)" % (node, info))
3195 74409b12 Iustin Pop
    if not info[0]:
3196 74409b12 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
3197 74409b12 Iustin Pop
                                 " %s" % info[1])
3198 74409b12 Iustin Pop
3199 74409b12 Iustin Pop
3200 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3201 a8083063 Iustin Pop
  """Create an instance.
3202 a8083063 Iustin Pop

3203 a8083063 Iustin Pop
  """
3204 a8083063 Iustin Pop
  HPATH = "instance-add"
3205 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3206 338e51e8 Iustin Pop
  _OP_REQP = ["instance_name", "disk_size",
3207 338e51e8 Iustin Pop
              "disk_template", "swap_size", "mode", "start",
3208 338e51e8 Iustin Pop
              "wait_for_sync", "ip_check", "mac",
3209 338e51e8 Iustin Pop
              "hvparams", "beparams"]
3210 7baf741d Guido Trotter
  REQ_BGL = False
3211 7baf741d Guido Trotter
3212 7baf741d Guido Trotter
  def _ExpandNode(self, node):
3213 7baf741d Guido Trotter
    """Expands and checks one node name.
3214 7baf741d Guido Trotter

3215 7baf741d Guido Trotter
    """
3216 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
3217 7baf741d Guido Trotter
    if node_full is None:
3218 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
3219 7baf741d Guido Trotter
    return node_full
3220 7baf741d Guido Trotter
3221 7baf741d Guido Trotter
  def ExpandNames(self):
3222 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
3223 7baf741d Guido Trotter

3224 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
3225 7baf741d Guido Trotter

3226 7baf741d Guido Trotter
    """
3227 7baf741d Guido Trotter
    self.needed_locks = {}
3228 7baf741d Guido Trotter
3229 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
3230 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
3231 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
3232 7baf741d Guido Trotter
        setattr(self.op, attr, None)
3233 7baf741d Guido Trotter
3234 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
3235 4b2f38dd Iustin Pop
3236 7baf741d Guido Trotter
    # verify creation mode
3237 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
3238 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
3239 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3240 7baf741d Guido Trotter
                                 self.op.mode)
3241 4b2f38dd Iustin Pop
3242 7baf741d Guido Trotter
    # disk template and mirror node verification
3243 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3244 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
3245 7baf741d Guido Trotter
3246 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
3247 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
3248 4b2f38dd Iustin Pop
3249 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3250 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
3251 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
3252 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3253 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
3254 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
3255 4b2f38dd Iustin Pop
3256 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
3257 6785674e Iustin Pop
3258 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
3259 8705eb96 Iustin Pop
                                  self.op.hvparams)
3260 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
3261 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
3262 6785674e Iustin Pop
3263 338e51e8 Iustin Pop
    # fill and remember the beparams dict
3264 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
3265 338e51e8 Iustin Pop
                                    self.op.beparams)
3266 338e51e8 Iustin Pop
3267 7baf741d Guido Trotter
    #### instance parameters check
3268 7baf741d Guido Trotter
3269 7baf741d Guido Trotter
    # instance name verification
3270 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
3271 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
3272 7baf741d Guido Trotter
3273 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
3274 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
3275 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
3276 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3277 7baf741d Guido Trotter
                                 instance_name)
3278 7baf741d Guido Trotter
3279 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3280 7baf741d Guido Trotter
3281 7baf741d Guido Trotter
    # ip validity checks
3282 7baf741d Guido Trotter
    ip = getattr(self.op, "ip", None)
3283 7baf741d Guido Trotter
    if ip is None or ip.lower() == "none":
3284 7baf741d Guido Trotter
      inst_ip = None
3285 7baf741d Guido Trotter
    elif ip.lower() == "auto":
3286 7baf741d Guido Trotter
      inst_ip = hostname1.ip
3287 7baf741d Guido Trotter
    else:
3288 7baf741d Guido Trotter
      if not utils.IsValidIP(ip):
3289 7baf741d Guido Trotter
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3290 7baf741d Guido Trotter
                                   " like a valid IP" % ip)
3291 7baf741d Guido Trotter
      inst_ip = ip
3292 7baf741d Guido Trotter
    self.inst_ip = self.op.ip = inst_ip
3293 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
3294 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
3295 7baf741d Guido Trotter
3296 7baf741d Guido Trotter
    # MAC address verification
3297 7baf741d Guido Trotter
    if self.op.mac != "auto":
3298 7baf741d Guido Trotter
      if not utils.IsValidMac(self.op.mac.lower()):
3299 7baf741d Guido Trotter
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3300 7baf741d Guido Trotter
                                   self.op.mac)
3301 7baf741d Guido Trotter
3302 7baf741d Guido Trotter
    # file storage checks
3303 7baf741d Guido Trotter
    if (self.op.file_driver and
3304 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
3305 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3306 7baf741d Guido Trotter
                                 self.op.file_driver)
3307 7baf741d Guido Trotter
3308 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3309 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
3310 7baf741d Guido Trotter
3311 7baf741d Guido Trotter
    ### Node/iallocator related checks
3312 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3313 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3314 7baf741d Guido Trotter
                                 " node must be given")
3315 7baf741d Guido Trotter
3316 7baf741d Guido Trotter
    if self.op.iallocator:
3317 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3318 7baf741d Guido Trotter
    else:
3319 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
3320 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
3321 7baf741d Guido Trotter
      if self.op.snode is not None:
3322 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
3323 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
3324 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3325 7baf741d Guido Trotter
3326 7baf741d Guido Trotter
    # in case of import lock the source node too
3327 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
3328 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
3329 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
3330 7baf741d Guido Trotter
3331 7baf741d Guido Trotter
      if src_node is None or src_path is None:
3332 7baf741d Guido Trotter
        raise errors.OpPrereqError("Importing an instance requires source"
3333 7baf741d Guido Trotter
                                   " node and path options")
3334 7baf741d Guido Trotter
3335 7baf741d Guido Trotter
      if not os.path.isabs(src_path):
3336 7baf741d Guido Trotter
        raise errors.OpPrereqError("The source path must be absolute")
3337 7baf741d Guido Trotter
3338 7baf741d Guido Trotter
      self.op.src_node = src_node = self._ExpandNode(src_node)
3339 7baf741d Guido Trotter
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3340 7baf741d Guido Trotter
        self.needed_locks[locking.LEVEL_NODE].append(src_node)
3341 7baf741d Guido Trotter
3342 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
3343 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
3344 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
3345 a8083063 Iustin Pop
3346 538475ca Iustin Pop
  def _RunAllocator(self):
3347 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3348 538475ca Iustin Pop

3349 538475ca Iustin Pop
    """
3350 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3351 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3352 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3353 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3354 72737a7f Iustin Pop
    ial = IAllocator(self,
3355 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3356 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3357 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3358 d1c2dd75 Iustin Pop
                     tags=[],
3359 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3360 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
3361 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
3362 d1c2dd75 Iustin Pop
                     disks=disks,
3363 d1c2dd75 Iustin Pop
                     nics=nics,
3364 29859cb7 Iustin Pop
                     )
3365 d1c2dd75 Iustin Pop
3366 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3367 d1c2dd75 Iustin Pop
3368 d1c2dd75 Iustin Pop
    if not ial.success:
3369 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3370 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3371 d1c2dd75 Iustin Pop
                                                           ial.info))
3372 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3373 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3374 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3375 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
3376 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
3377 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3378 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
3379 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
3380 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
3381 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
3382 27579978 Iustin Pop
    if ial.required_nodes == 2:
3383 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3384 538475ca Iustin Pop
3385 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3386 a8083063 Iustin Pop
    """Build hooks env.
3387 a8083063 Iustin Pop

3388 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3389 a8083063 Iustin Pop

3390 a8083063 Iustin Pop
    """
3391 a8083063 Iustin Pop
    env = {
3392 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3393 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3394 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3395 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3396 a8083063 Iustin Pop
      }
3397 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3398 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3399 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3400 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
3401 396e1b78 Michael Hanselmann
3402 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3403 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3404 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3405 396e1b78 Michael Hanselmann
      status=self.instance_status,
3406 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3407 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
3408 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
3409 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3410 396e1b78 Michael Hanselmann
    ))
3411 a8083063 Iustin Pop
3412 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
3413 a8083063 Iustin Pop
          self.secondaries)
3414 a8083063 Iustin Pop
    return env, nl, nl
3415 a8083063 Iustin Pop
3416 a8083063 Iustin Pop
3417 a8083063 Iustin Pop
  def CheckPrereq(self):
3418 a8083063 Iustin Pop
    """Check prerequisites.
3419 a8083063 Iustin Pop

3420 a8083063 Iustin Pop
    """
3421 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3422 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3423 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3424 eedc99de Manuel Franceschini
                                 " instances")
3425 eedc99de Manuel Franceschini
3426 e69d05fd Iustin Pop
3427 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3428 7baf741d Guido Trotter
      src_node = self.op.src_node
3429 7baf741d Guido Trotter
      src_path = self.op.src_path
3430 a8083063 Iustin Pop
3431 72737a7f Iustin Pop
      export_info = self.rpc.call_export_info(src_node, src_path)
3432 a8083063 Iustin Pop
3433 a8083063 Iustin Pop
      if not export_info:
3434 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3435 a8083063 Iustin Pop
3436 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3437 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3438 a8083063 Iustin Pop
3439 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3440 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3441 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3442 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3443 a8083063 Iustin Pop
3444 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3445 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3446 3ecf6786 Iustin Pop
                                   " one data disk")
3447 a8083063 Iustin Pop
3448 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3449 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3450 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3451 a8083063 Iustin Pop
                                                         'disk0_dump'))
3452 a8083063 Iustin Pop
      self.src_image = diskimage
3453 901a65c1 Iustin Pop
3454 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3455 901a65c1 Iustin Pop
3456 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3457 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3458 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3459 901a65c1 Iustin Pop
3460 901a65c1 Iustin Pop
    if self.op.ip_check:
3461 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3462 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3463 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
3464 901a65c1 Iustin Pop
3465 901a65c1 Iustin Pop
    # bridge verification
3466 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3467 901a65c1 Iustin Pop
    if bridge is None:
3468 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3469 901a65c1 Iustin Pop
    else:
3470 901a65c1 Iustin Pop
      self.op.bridge = bridge
3471 901a65c1 Iustin Pop
3472 538475ca Iustin Pop
    #### allocator run
3473 538475ca Iustin Pop
3474 538475ca Iustin Pop
    if self.op.iallocator is not None:
3475 538475ca Iustin Pop
      self._RunAllocator()
3476 0f1a06e3 Manuel Franceschini
3477 901a65c1 Iustin Pop
    #### node related checks
3478 901a65c1 Iustin Pop
3479 901a65c1 Iustin Pop
    # check primary node
3480 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3481 7baf741d Guido Trotter
    assert self.pnode is not None, \
3482 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
3483 901a65c1 Iustin Pop
    self.secondaries = []
3484 901a65c1 Iustin Pop
3485 901a65c1 Iustin Pop
    # mirror node verification
3486 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3487 7baf741d Guido Trotter
      if self.op.snode is None:
3488 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3489 3ecf6786 Iustin Pop
                                   " a mirror node")
3490 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
3491 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3492 3ecf6786 Iustin Pop
                                   " the primary node.")
3493 7baf741d Guido Trotter
      self.secondaries.append(self.op.snode)
3494 a8083063 Iustin Pop
3495 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
3496 6785674e Iustin Pop
3497 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3498 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3499 ed1ebc60 Guido Trotter
3500 8d75db10 Iustin Pop
    # Check lv size requirements
3501 8d75db10 Iustin Pop
    if req_size is not None:
3502 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3503 72737a7f Iustin Pop
                                         self.op.hypervisor)
3504 8d75db10 Iustin Pop
      for node in nodenames:
3505 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3506 8d75db10 Iustin Pop
        if not info:
3507 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3508 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3509 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3510 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3511 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3512 8d75db10 Iustin Pop
                                     " node %s" % node)
3513 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3514 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3515 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3516 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3517 ed1ebc60 Guido Trotter
3518 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
3519 6785674e Iustin Pop
3520 a8083063 Iustin Pop
    # os verification
3521 72737a7f Iustin Pop
    os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
3522 dfa96ded Guido Trotter
    if not os_obj:
3523 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3524 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3525 a8083063 Iustin Pop
3526 901a65c1 Iustin Pop
    # bridge check on primary node
3527 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3528 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3529 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3530 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3531 a8083063 Iustin Pop
3532 49ce1563 Iustin Pop
    # memory check on primary node
3533 49ce1563 Iustin Pop
    if self.op.start:
3534 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
3535 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3536 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
3537 338e51e8 Iustin Pop
                           self.op.hypervisor)
3538 49ce1563 Iustin Pop
3539 a8083063 Iustin Pop
    if self.op.start:
3540 a8083063 Iustin Pop
      self.instance_status = 'up'
3541 a8083063 Iustin Pop
    else:
3542 a8083063 Iustin Pop
      self.instance_status = 'down'
3543 a8083063 Iustin Pop
3544 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3545 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3546 a8083063 Iustin Pop

3547 a8083063 Iustin Pop
    """
3548 a8083063 Iustin Pop
    instance = self.op.instance_name
3549 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3550 a8083063 Iustin Pop
3551 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3552 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3553 1862d460 Alexander Schreiber
    else:
3554 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3555 1862d460 Alexander Schreiber
3556 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3557 a8083063 Iustin Pop
    if self.inst_ip is not None:
3558 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3559 a8083063 Iustin Pop
3560 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
3561 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3562 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3563 2a6469d5 Alexander Schreiber
    else:
3564 2a6469d5 Alexander Schreiber
      network_port = None
3565 58acb49d Alexander Schreiber
3566 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
3567 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3568 31a853d2 Iustin Pop
3569 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3570 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3571 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3572 2c313123 Manuel Franceschini
    else:
3573 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3574 2c313123 Manuel Franceschini
3575 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3576 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3577 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
3578 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3579 0f1a06e3 Manuel Franceschini
3580 0f1a06e3 Manuel Franceschini
3581 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
3582 a8083063 Iustin Pop
                                  self.op.disk_template,
3583 a8083063 Iustin Pop
                                  instance, pnode_name,
3584 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3585 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3586 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3587 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3588 a8083063 Iustin Pop
3589 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3590 a8083063 Iustin Pop
                            primary_node=pnode_name,
3591 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3592 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3593 a8083063 Iustin Pop
                            status=self.instance_status,
3594 58acb49d Alexander Schreiber
                            network_port=network_port,
3595 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
3596 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
3597 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
3598 a8083063 Iustin Pop
                            )
3599 a8083063 Iustin Pop
3600 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3601 b9bddb6b Iustin Pop
    if not _CreateDisks(self, iobj):
3602 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3603 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance)
3604 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3605 a8083063 Iustin Pop
3606 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3607 a8083063 Iustin Pop
3608 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3609 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
3610 7baf741d Guido Trotter
    # added the instance to the config
3611 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
3612 a1578d63 Iustin Pop
    # Remove the temp. assignements for the instance's drbds
3613 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance)
3614 a8083063 Iustin Pop
3615 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3616 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
3617 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3618 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3619 a8083063 Iustin Pop
      time.sleep(15)
3620 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3621 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
3622 a8083063 Iustin Pop
    else:
3623 a8083063 Iustin Pop
      disk_abort = False
3624 a8083063 Iustin Pop
3625 a8083063 Iustin Pop
    if disk_abort:
3626 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3627 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3628 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
3629 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3630 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3631 3ecf6786 Iustin Pop
                               " this instance")
3632 a8083063 Iustin Pop
3633 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3634 a8083063 Iustin Pop
                (instance, pnode_name))
3635 a8083063 Iustin Pop
3636 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3637 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3638 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3639 72737a7f Iustin Pop
        if not self.rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3640 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3641 3ecf6786 Iustin Pop
                                   " on node %s" %
3642 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3643 a8083063 Iustin Pop
3644 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3645 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3646 a8083063 Iustin Pop
        src_node = self.op.src_node
3647 a8083063 Iustin Pop
        src_image = self.src_image
3648 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
3649 72737a7f Iustin Pop
        if not self.rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3650 72737a7f Iustin Pop
                                                src_node, src_image,
3651 72737a7f Iustin Pop
                                                cluster_name):
3652 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3653 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3654 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3655 a8083063 Iustin Pop
      else:
3656 a8083063 Iustin Pop
        # also checked in the prereq part
3657 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3658 3ecf6786 Iustin Pop
                                     % self.op.mode)
3659 a8083063 Iustin Pop
3660 a8083063 Iustin Pop
    if self.op.start:
3661 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3662 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3663 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(pnode_name, iobj, None):
3664 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3665 a8083063 Iustin Pop
3666 a8083063 Iustin Pop
3667 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3668 a8083063 Iustin Pop
  """Connect to an instance's console.
3669 a8083063 Iustin Pop

3670 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3671 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3672 a8083063 Iustin Pop
  console.
3673 a8083063 Iustin Pop

3674 a8083063 Iustin Pop
  """
3675 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3676 8659b73e Guido Trotter
  REQ_BGL = False
3677 8659b73e Guido Trotter
3678 8659b73e Guido Trotter
  def ExpandNames(self):
3679 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3680 a8083063 Iustin Pop
3681 a8083063 Iustin Pop
  def CheckPrereq(self):
3682 a8083063 Iustin Pop
    """Check prerequisites.
3683 a8083063 Iustin Pop

3684 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3685 a8083063 Iustin Pop

3686 a8083063 Iustin Pop
    """
3687 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3688 8659b73e Guido Trotter
    assert self.instance is not None, \
3689 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3690 a8083063 Iustin Pop
3691 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3692 a8083063 Iustin Pop
    """Connect to the console of an instance
3693 a8083063 Iustin Pop

3694 a8083063 Iustin Pop
    """
3695 a8083063 Iustin Pop
    instance = self.instance
3696 a8083063 Iustin Pop
    node = instance.primary_node
3697 a8083063 Iustin Pop
3698 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
3699 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
3700 a8083063 Iustin Pop
    if node_insts is False:
3701 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3702 a8083063 Iustin Pop
3703 a8083063 Iustin Pop
    if instance.name not in node_insts:
3704 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3705 a8083063 Iustin Pop
3706 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3707 a8083063 Iustin Pop
3708 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
3709 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3710 b047857b Michael Hanselmann
3711 82122173 Iustin Pop
    # build ssh cmdline
3712 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3713 a8083063 Iustin Pop
3714 a8083063 Iustin Pop
3715 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3716 a8083063 Iustin Pop
  """Replace the disks of an instance.
3717 a8083063 Iustin Pop

3718 a8083063 Iustin Pop
  """
3719 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3720 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3721 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3722 efd990e4 Guido Trotter
  REQ_BGL = False
3723 efd990e4 Guido Trotter
3724 efd990e4 Guido Trotter
  def ExpandNames(self):
3725 efd990e4 Guido Trotter
    self._ExpandAndLockInstance()
3726 efd990e4 Guido Trotter
3727 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
3728 efd990e4 Guido Trotter
      self.op.remote_node = None
3729 efd990e4 Guido Trotter
3730 efd990e4 Guido Trotter
    ia_name = getattr(self.op, "iallocator", None)
3731 efd990e4 Guido Trotter
    if ia_name is not None:
3732 efd990e4 Guido Trotter
      if self.op.remote_node is not None:
3733 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
3734 efd990e4 Guido Trotter
                                   " secondary, not both")
3735 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3736 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
3737 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3738 efd990e4 Guido Trotter
      if remote_node is None:
3739 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
3740 efd990e4 Guido Trotter
                                   self.op.remote_node)
3741 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
3742 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
3743 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3744 efd990e4 Guido Trotter
    else:
3745 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
3746 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3747 efd990e4 Guido Trotter
3748 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
3749 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
3750 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
3751 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
3752 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
3753 efd990e4 Guido Trotter
      self._LockInstancesNodes()
3754 a8083063 Iustin Pop
3755 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3756 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3757 b6e82a65 Iustin Pop

3758 b6e82a65 Iustin Pop
    """
3759 72737a7f Iustin Pop
    ial = IAllocator(self,
3760 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3761 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3762 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3763 b6e82a65 Iustin Pop
3764 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3765 b6e82a65 Iustin Pop
3766 b6e82a65 Iustin Pop
    if not ial.success:
3767 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3768 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3769 b6e82a65 Iustin Pop
                                                           ial.info))
3770 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3771 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3772 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3773 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3774 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3775 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3776 b6e82a65 Iustin Pop
                    self.op.remote_node)
3777 b6e82a65 Iustin Pop
3778 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3779 a8083063 Iustin Pop
    """Build hooks env.
3780 a8083063 Iustin Pop

3781 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3782 a8083063 Iustin Pop

3783 a8083063 Iustin Pop
    """
3784 a8083063 Iustin Pop
    env = {
3785 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3786 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3787 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3788 a8083063 Iustin Pop
      }
3789 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3790 0834c866 Iustin Pop
    nl = [
3791 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
3792 0834c866 Iustin Pop
      self.instance.primary_node,
3793 0834c866 Iustin Pop
      ]
3794 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3795 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3796 a8083063 Iustin Pop
    return env, nl, nl
3797 a8083063 Iustin Pop
3798 a8083063 Iustin Pop
  def CheckPrereq(self):
3799 a8083063 Iustin Pop
    """Check prerequisites.
3800 a8083063 Iustin Pop

3801 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3802 a8083063 Iustin Pop

3803 a8083063 Iustin Pop
    """
3804 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3805 efd990e4 Guido Trotter
    assert instance is not None, \
3806 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3807 a8083063 Iustin Pop
    self.instance = instance
3808 a8083063 Iustin Pop
3809 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3810 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3811 a9e0c397 Iustin Pop
                                 " network mirrored.")
3812 a8083063 Iustin Pop
3813 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3814 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3815 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3816 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3817 a8083063 Iustin Pop
3818 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3819 a9e0c397 Iustin Pop
3820 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3821 b6e82a65 Iustin Pop
    if ia_name is not None:
3822 de8c7666 Guido Trotter
      self._RunAllocator()
3823 b6e82a65 Iustin Pop
3824 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3825 a9e0c397 Iustin Pop
    if remote_node is not None:
3826 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3827 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
3828 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
3829 a9e0c397 Iustin Pop
    else:
3830 a9e0c397 Iustin Pop
      self.remote_node_info = None
3831 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3832 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3833 3ecf6786 Iustin Pop
                                 " the instance.")
3834 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3835 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3836 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3837 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3838 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3839 0834c866 Iustin Pop
                                   " replacement")
3840 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3841 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3842 7df43a76 Iustin Pop
          remote_node is not None):
3843 7df43a76 Iustin Pop
        # switch to replace secondary mode
3844 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3845 7df43a76 Iustin Pop
3846 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3847 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3848 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3849 a9e0c397 Iustin Pop
                                   " both at once")
3850 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3851 a9e0c397 Iustin Pop
        if remote_node is not None:
3852 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3853 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3854 a9e0c397 Iustin Pop
                                     " node disk replacement")
3855 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3856 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3857 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3858 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3859 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3860 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3861 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3862 a9e0c397 Iustin Pop
      else:
3863 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3864 a9e0c397 Iustin Pop
3865 a9e0c397 Iustin Pop
    for name in self.op.disks:
3866 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3867 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3868 a9e0c397 Iustin Pop
                                   (name, instance.name))
3869 a8083063 Iustin Pop
3870 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3871 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3872 a9e0c397 Iustin Pop

3873 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3874 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3875 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3876 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3877 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3878 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3879 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3880 a9e0c397 Iustin Pop
      - wait for sync across all devices
3881 a9e0c397 Iustin Pop
      - for each modified disk:
3882 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3883 a9e0c397 Iustin Pop

3884 a9e0c397 Iustin Pop
    Failures are not very well handled.
3885 cff90b79 Iustin Pop

3886 a9e0c397 Iustin Pop
    """
3887 cff90b79 Iustin Pop
    steps_total = 6
3888 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3889 a9e0c397 Iustin Pop
    instance = self.instance
3890 a9e0c397 Iustin Pop
    iv_names = {}
3891 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3892 a9e0c397 Iustin Pop
    # start of work
3893 a9e0c397 Iustin Pop
    cfg = self.cfg
3894 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3895 cff90b79 Iustin Pop
    oth_node = self.oth_node
3896 cff90b79 Iustin Pop
3897 cff90b79 Iustin Pop
    # Step: check device activation
3898 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3899 cff90b79 Iustin Pop
    info("checking volume groups")
3900 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3901 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
3902 cff90b79 Iustin Pop
    if not results:
3903 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3904 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3905 cff90b79 Iustin Pop
      res = results.get(node, False)
3906 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3907 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3908 cff90b79 Iustin Pop
                                 (my_vg, node))
3909 cff90b79 Iustin Pop
    for dev in instance.disks:
3910 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3911 cff90b79 Iustin Pop
        continue
3912 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3913 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3914 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3915 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_find(node, dev):
3916 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3917 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3918 cff90b79 Iustin Pop
3919 cff90b79 Iustin Pop
    # Step: check other node consistency
3920 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3921 cff90b79 Iustin Pop
    for dev in instance.disks:
3922 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3923 cff90b79 Iustin Pop
        continue
3924 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3925 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
3926 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3927 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3928 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3929 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3930 cff90b79 Iustin Pop
3931 cff90b79 Iustin Pop
    # Step: create new storage
3932 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3933 a9e0c397 Iustin Pop
    for dev in instance.disks:
3934 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3935 a9e0c397 Iustin Pop
        continue
3936 a9e0c397 Iustin Pop
      size = dev.size
3937 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3938 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3939 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
3940 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3941 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3942 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3943 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3944 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3945 a9e0c397 Iustin Pop
      old_lvs = dev.children
3946 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3947 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3948 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3949 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3950 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3951 a9e0c397 Iustin Pop
      # are talking about the secondary node
3952 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3953 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
3954 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3955 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3956 a9e0c397 Iustin Pop
                                   " node '%s'" %
3957 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3958 a9e0c397 Iustin Pop
3959 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3960 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3961 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3962 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3963 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3964 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3965 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3966 cff90b79 Iustin Pop
      #dev.children = []
3967 cff90b79 Iustin Pop
      #cfg.Update(instance)
3968 a9e0c397 Iustin Pop
3969 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3970 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3971 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3972 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3973 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3974 cff90b79 Iustin Pop
3975 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3976 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3977 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3978 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3979 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3980 cff90b79 Iustin Pop
      rlist = []
3981 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3982 72737a7f Iustin Pop
        find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
3983 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3984 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3985 cff90b79 Iustin Pop
3986 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3987 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
3988 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3989 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3990 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3991 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3992 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
3993 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3994 cff90b79 Iustin Pop
3995 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3996 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3997 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3998 a9e0c397 Iustin Pop
3999 cff90b79 Iustin Pop
      for disk in old_lvs:
4000 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
4001 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
4002 a9e0c397 Iustin Pop
4003 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
4004 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
4005 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
4006 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
4007 72737a7f Iustin Pop
          if not self.rpc.call_blockdev_remove(tgt_node, new_lv):
4008 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
4009 cff90b79 Iustin Pop
                    " logical volumes")
4010 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
4011 a9e0c397 Iustin Pop
4012 a9e0c397 Iustin Pop
      dev.children = new_lvs
4013 a9e0c397 Iustin Pop
      cfg.Update(instance)
4014 a9e0c397 Iustin Pop
4015 cff90b79 Iustin Pop
    # Step: wait for sync
4016 a9e0c397 Iustin Pop
4017 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4018 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4019 a9e0c397 Iustin Pop
    # return value
4020 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4021 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4022 a9e0c397 Iustin Pop
4023 a9e0c397 Iustin Pop
    # so check manually all the devices
4024 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4025 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
4026 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(instance.primary_node, dev)[5]
4027 a9e0c397 Iustin Pop
      if is_degr:
4028 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4029 a9e0c397 Iustin Pop
4030 cff90b79 Iustin Pop
    # Step: remove old storage
4031 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4032 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4033 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
4034 a9e0c397 Iustin Pop
      for lv in old_lvs:
4035 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
4036 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(tgt_node, lv):
4037 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
4038 a9e0c397 Iustin Pop
          continue
4039 a9e0c397 Iustin Pop
4040 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
4041 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
4042 a9e0c397 Iustin Pop

4043 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4044 a9e0c397 Iustin Pop
      - for all disks of the instance:
4045 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
4046 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
4047 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
4048 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
4049 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
4050 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
4051 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
4052 a9e0c397 Iustin Pop
          not network enabled
4053 a9e0c397 Iustin Pop
      - wait for sync across all devices
4054 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
4055 a9e0c397 Iustin Pop

4056 a9e0c397 Iustin Pop
    Failures are not very well handled.
4057 0834c866 Iustin Pop

4058 a9e0c397 Iustin Pop
    """
4059 0834c866 Iustin Pop
    steps_total = 6
4060 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4061 a9e0c397 Iustin Pop
    instance = self.instance
4062 a9e0c397 Iustin Pop
    iv_names = {}
4063 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4064 a9e0c397 Iustin Pop
    # start of work
4065 a9e0c397 Iustin Pop
    cfg = self.cfg
4066 a9e0c397 Iustin Pop
    old_node = self.tgt_node
4067 a9e0c397 Iustin Pop
    new_node = self.new_node
4068 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
4069 0834c866 Iustin Pop
4070 0834c866 Iustin Pop
    # Step: check device activation
4071 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4072 0834c866 Iustin Pop
    info("checking volume groups")
4073 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
4074 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
4075 0834c866 Iustin Pop
    if not results:
4076 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4077 0834c866 Iustin Pop
    for node in pri_node, new_node:
4078 0834c866 Iustin Pop
      res = results.get(node, False)
4079 0834c866 Iustin Pop
      if not res or my_vg not in res:
4080 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4081 0834c866 Iustin Pop
                                 (my_vg, node))
4082 0834c866 Iustin Pop
    for dev in instance.disks:
4083 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4084 0834c866 Iustin Pop
        continue
4085 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
4086 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4087 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4088 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
4089 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
4090 0834c866 Iustin Pop
4091 0834c866 Iustin Pop
    # Step: check other node consistency
4092 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4093 0834c866 Iustin Pop
    for dev in instance.disks:
4094 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4095 0834c866 Iustin Pop
        continue
4096 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
4097 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
4098 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4099 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4100 0834c866 Iustin Pop
                                 pri_node)
4101 0834c866 Iustin Pop
4102 0834c866 Iustin Pop
    # Step: create new storage
4103 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4104 468b46f9 Iustin Pop
    for dev in instance.disks:
4105 a9e0c397 Iustin Pop
      size = dev.size
4106 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
4107 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4108 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4109 a9e0c397 Iustin Pop
      # are talking about the secondary node
4110 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4111 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
4112 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4113 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4114 a9e0c397 Iustin Pop
                                   " node '%s'" %
4115 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4116 a9e0c397 Iustin Pop
4117 0834c866 Iustin Pop
4118 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
4119 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
4120 a1578d63 Iustin Pop
    # error and the success paths
4121 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4122 a1578d63 Iustin Pop
                                   instance.name)
4123 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
4124 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4125 468b46f9 Iustin Pop
    for dev, new_minor in zip(instance.disks, minors):
4126 0834c866 Iustin Pop
      size = dev.size
4127 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
4128 a9e0c397 Iustin Pop
      # create new devices on new_node
4129 ffa1c0dc Iustin Pop
      if pri_node == dev.logical_id[0]:
4130 ffa1c0dc Iustin Pop
        new_logical_id = (pri_node, new_node,
4131 f9518d38 Iustin Pop
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4132 f9518d38 Iustin Pop
                          dev.logical_id[5])
4133 ffa1c0dc Iustin Pop
      else:
4134 ffa1c0dc Iustin Pop
        new_logical_id = (new_node, pri_node,
4135 f9518d38 Iustin Pop
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4136 f9518d38 Iustin Pop
                          dev.logical_id[5])
4137 468b46f9 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children, new_logical_id)
4138 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4139 a1578d63 Iustin Pop
                    new_logical_id)
4140 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4141 ffa1c0dc Iustin Pop
                              logical_id=new_logical_id,
4142 a9e0c397 Iustin Pop
                              children=dev.children)
4143 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(self, new_node, instance,
4144 3f78eef2 Iustin Pop
                                        new_drbd, False,
4145 b9bddb6b Iustin Pop
                                        _GetInstanceInfoText(instance)):
4146 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4147 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4148 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4149 a9e0c397 Iustin Pop
4150 0834c866 Iustin Pop
    for dev in instance.disks:
4151 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4152 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
4153 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4154 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_shutdown(old_node, dev):
4155 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
4156 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4157 a9e0c397 Iustin Pop
4158 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4159 642445d9 Iustin Pop
    done = 0
4160 642445d9 Iustin Pop
    for dev in instance.disks:
4161 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4162 f9518d38 Iustin Pop
      # set the network part of the physical (unique in bdev terms) id
4163 f9518d38 Iustin Pop
      # to None, meaning detach from network
4164 f9518d38 Iustin Pop
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4165 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4166 642445d9 Iustin Pop
      # standalone state
4167 72737a7f Iustin Pop
      if self.rpc.call_blockdev_find(pri_node, dev):
4168 642445d9 Iustin Pop
        done += 1
4169 642445d9 Iustin Pop
      else:
4170 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
4171 642445d9 Iustin Pop
                dev.iv_name)
4172 642445d9 Iustin Pop
4173 642445d9 Iustin Pop
    if not done:
4174 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4175 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
4176 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4177 642445d9 Iustin Pop
4178 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4179 642445d9 Iustin Pop
    # the instance to point to the new secondary
4180 642445d9 Iustin Pop
    info("updating instance configuration")
4181 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
4182 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
4183 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4184 642445d9 Iustin Pop
    cfg.Update(instance)
4185 a1578d63 Iustin Pop
    # we can remove now the temp minors as now the new values are
4186 a1578d63 Iustin Pop
    # written to the config file (and therefore stable)
4187 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance.name)
4188 a9e0c397 Iustin Pop
4189 642445d9 Iustin Pop
    # and now perform the drbd attach
4190 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4191 642445d9 Iustin Pop
    failures = []
4192 642445d9 Iustin Pop
    for dev in instance.disks:
4193 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
4194 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4195 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4196 642445d9 Iustin Pop
      # is correct
4197 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4198 ffa1c0dc Iustin Pop
      logging.debug("Disk to attach: %s", dev)
4199 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4200 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
4201 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4202 a9e0c397 Iustin Pop
4203 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4204 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4205 a9e0c397 Iustin Pop
    # return value
4206 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4207 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4208 a9e0c397 Iustin Pop
4209 a9e0c397 Iustin Pop
    # so check manually all the devices
4210 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4211 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4212 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(pri_node, dev)[5]
4213 a9e0c397 Iustin Pop
      if is_degr:
4214 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4215 a9e0c397 Iustin Pop
4216 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4217 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4218 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
4219 a9e0c397 Iustin Pop
      for lv in old_lvs:
4220 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4221 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(old_node, lv):
4222 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4223 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4224 a9e0c397 Iustin Pop
4225 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4226 a9e0c397 Iustin Pop
    """Execute disk replacement.
4227 a9e0c397 Iustin Pop

4228 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4229 a9e0c397 Iustin Pop

4230 a9e0c397 Iustin Pop
    """
4231 a9e0c397 Iustin Pop
    instance = self.instance
4232 22985314 Guido Trotter
4233 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
4234 22985314 Guido Trotter
    if instance.status == "down":
4235 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
4236 22985314 Guido Trotter
4237 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4238 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4239 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4240 a9e0c397 Iustin Pop
      else:
4241 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4242 a9e0c397 Iustin Pop
    else:
4243 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4244 22985314 Guido Trotter
4245 22985314 Guido Trotter
    ret = fn(feedback_fn)
4246 22985314 Guido Trotter
4247 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
4248 22985314 Guido Trotter
    if instance.status == "down":
4249 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
4250 22985314 Guido Trotter
4251 22985314 Guido Trotter
    return ret
4252 a9e0c397 Iustin Pop
4253 a8083063 Iustin Pop
4254 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4255 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4256 8729e0d7 Iustin Pop

4257 8729e0d7 Iustin Pop
  """
4258 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4259 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4260 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
4261 31e63dbf Guido Trotter
  REQ_BGL = False
4262 31e63dbf Guido Trotter
4263 31e63dbf Guido Trotter
  def ExpandNames(self):
4264 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4265 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4266 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4267 31e63dbf Guido Trotter
4268 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4269 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4270 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4271 8729e0d7 Iustin Pop
4272 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4273 8729e0d7 Iustin Pop
    """Build hooks env.
4274 8729e0d7 Iustin Pop

4275 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4276 8729e0d7 Iustin Pop

4277 8729e0d7 Iustin Pop
    """
4278 8729e0d7 Iustin Pop
    env = {
4279 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4280 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4281 8729e0d7 Iustin Pop
      }
4282 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4283 8729e0d7 Iustin Pop
    nl = [
4284 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4285 8729e0d7 Iustin Pop
      self.instance.primary_node,
4286 8729e0d7 Iustin Pop
      ]
4287 8729e0d7 Iustin Pop
    return env, nl, nl
4288 8729e0d7 Iustin Pop
4289 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4290 8729e0d7 Iustin Pop
    """Check prerequisites.
4291 8729e0d7 Iustin Pop

4292 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4293 8729e0d7 Iustin Pop

4294 8729e0d7 Iustin Pop
    """
4295 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4296 31e63dbf Guido Trotter
    assert instance is not None, \
4297 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4298 31e63dbf Guido Trotter
4299 8729e0d7 Iustin Pop
    self.instance = instance
4300 8729e0d7 Iustin Pop
4301 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4302 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4303 8729e0d7 Iustin Pop
                                 " growing.")
4304 8729e0d7 Iustin Pop
4305 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
4306 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4307 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
4308 8729e0d7 Iustin Pop
4309 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4310 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4311 72737a7f Iustin Pop
                                       instance.hypervisor)
4312 8729e0d7 Iustin Pop
    for node in nodenames:
4313 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4314 8729e0d7 Iustin Pop
      if not info:
4315 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4316 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4317 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4318 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4319 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4320 8729e0d7 Iustin Pop
                                   " node %s" % node)
4321 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4322 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4323 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4324 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4325 8729e0d7 Iustin Pop
4326 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4327 8729e0d7 Iustin Pop
    """Execute disk grow.
4328 8729e0d7 Iustin Pop

4329 8729e0d7 Iustin Pop
    """
4330 8729e0d7 Iustin Pop
    instance = self.instance
4331 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
4332 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4333 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4334 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
4335 72737a7f Iustin Pop
      if (not result or not isinstance(result, (list, tuple)) or
4336 72737a7f Iustin Pop
          len(result) != 2):
4337 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4338 8729e0d7 Iustin Pop
      elif not result[0]:
4339 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4340 8729e0d7 Iustin Pop
                                 (node, result[1]))
4341 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4342 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4343 6605411d Iustin Pop
    if self.op.wait_for_sync:
4344 6605411d Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, instance, self.proc)
4345 6605411d Iustin Pop
      if disk_abort:
4346 6605411d Iustin Pop
        logger.Error("Warning: disk sync-ing has not returned a good status.\n"
4347 6605411d Iustin Pop
                     " Please check the instance.")
4348 8729e0d7 Iustin Pop
4349 8729e0d7 Iustin Pop
4350 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4351 a8083063 Iustin Pop
  """Query runtime instance data.
4352 a8083063 Iustin Pop

4353 a8083063 Iustin Pop
  """
4354 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
4355 a987fa48 Guido Trotter
  REQ_BGL = False
4356 ae5849b5 Michael Hanselmann
4357 a987fa48 Guido Trotter
  def ExpandNames(self):
4358 a987fa48 Guido Trotter
    self.needed_locks = {}
4359 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4360 a987fa48 Guido Trotter
4361 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
4362 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4363 a987fa48 Guido Trotter
4364 a987fa48 Guido Trotter
    if self.op.instances:
4365 a987fa48 Guido Trotter
      self.wanted_names = []
4366 a987fa48 Guido Trotter
      for name in self.op.instances:
4367 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
4368 a987fa48 Guido Trotter
        if full_name is None:
4369 a987fa48 Guido Trotter
          raise errors.OpPrereqError("Instance '%s' not known" %
4370 a987fa48 Guido Trotter
                                     self.op.instance_name)
4371 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
4372 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4373 a987fa48 Guido Trotter
    else:
4374 a987fa48 Guido Trotter
      self.wanted_names = None
4375 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4376 a987fa48 Guido Trotter
4377 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4378 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4379 a987fa48 Guido Trotter
4380 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
4381 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
4382 a987fa48 Guido Trotter
      self._LockInstancesNodes()
4383 a8083063 Iustin Pop
4384 a8083063 Iustin Pop
  def CheckPrereq(self):
4385 a8083063 Iustin Pop
    """Check prerequisites.
4386 a8083063 Iustin Pop

4387 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4388 a8083063 Iustin Pop

4389 a8083063 Iustin Pop
    """
4390 a987fa48 Guido Trotter
    if self.wanted_names is None:
4391 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4392 a8083063 Iustin Pop
4393 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4394 a987fa48 Guido Trotter
                             in self.wanted_names]
4395 a987fa48 Guido Trotter
    return
4396 a8083063 Iustin Pop
4397 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4398 a8083063 Iustin Pop
    """Compute block device status.
4399 a8083063 Iustin Pop

4400 a8083063 Iustin Pop
    """
4401 57821cac Iustin Pop
    static = self.op.static
4402 57821cac Iustin Pop
    if not static:
4403 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
4404 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
4405 57821cac Iustin Pop
    else:
4406 57821cac Iustin Pop
      dev_pstatus = None
4407 57821cac Iustin Pop
4408 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4409 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4410 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4411 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4412 a8083063 Iustin Pop
      else:
4413 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4414 a8083063 Iustin Pop
4415 57821cac Iustin Pop
    if snode and not static:
4416 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4417 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
4418 a8083063 Iustin Pop
    else:
4419 a8083063 Iustin Pop
      dev_sstatus = None
4420 a8083063 Iustin Pop
4421 a8083063 Iustin Pop
    if dev.children:
4422 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4423 a8083063 Iustin Pop
                      for child in dev.children]
4424 a8083063 Iustin Pop
    else:
4425 a8083063 Iustin Pop
      dev_children = []
4426 a8083063 Iustin Pop
4427 a8083063 Iustin Pop
    data = {
4428 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4429 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4430 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4431 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4432 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4433 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4434 a8083063 Iustin Pop
      "children": dev_children,
4435 a8083063 Iustin Pop
      }
4436 a8083063 Iustin Pop
4437 a8083063 Iustin Pop
    return data
4438 a8083063 Iustin Pop
4439 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4440 a8083063 Iustin Pop
    """Gather and return data"""
4441 a8083063 Iustin Pop
    result = {}
4442 338e51e8 Iustin Pop
4443 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4444 338e51e8 Iustin Pop
4445 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4446 57821cac Iustin Pop
      if not self.op.static:
4447 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
4448 57821cac Iustin Pop
                                                  instance.name,
4449 57821cac Iustin Pop
                                                  instance.hypervisor)
4450 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
4451 57821cac Iustin Pop
          remote_state = "up"
4452 57821cac Iustin Pop
        else:
4453 57821cac Iustin Pop
          remote_state = "down"
4454 a8083063 Iustin Pop
      else:
4455 57821cac Iustin Pop
        remote_state = None
4456 a8083063 Iustin Pop
      if instance.status == "down":
4457 a8083063 Iustin Pop
        config_state = "down"
4458 a8083063 Iustin Pop
      else:
4459 a8083063 Iustin Pop
        config_state = "up"
4460 a8083063 Iustin Pop
4461 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4462 a8083063 Iustin Pop
               for device in instance.disks]
4463 a8083063 Iustin Pop
4464 a8083063 Iustin Pop
      idict = {
4465 a8083063 Iustin Pop
        "name": instance.name,
4466 a8083063 Iustin Pop
        "config_state": config_state,
4467 a8083063 Iustin Pop
        "run_state": remote_state,
4468 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4469 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4470 a8083063 Iustin Pop
        "os": instance.os,
4471 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4472 a8083063 Iustin Pop
        "disks": disks,
4473 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
4474 24838135 Iustin Pop
        "network_port": instance.network_port,
4475 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
4476 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
4477 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
4478 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
4479 a8083063 Iustin Pop
        }
4480 a8083063 Iustin Pop
4481 a8083063 Iustin Pop
      result[instance.name] = idict
4482 a8083063 Iustin Pop
4483 a8083063 Iustin Pop
    return result
4484 a8083063 Iustin Pop
4485 a8083063 Iustin Pop
4486 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4487 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4488 a8083063 Iustin Pop

4489 a8083063 Iustin Pop
  """
4490 a8083063 Iustin Pop
  HPATH = "instance-modify"
4491 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4492 74409b12 Iustin Pop
  _OP_REQP = ["instance_name", "hvparams"]
4493 1a5c7281 Guido Trotter
  REQ_BGL = False
4494 1a5c7281 Guido Trotter
4495 1a5c7281 Guido Trotter
  def ExpandNames(self):
4496 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4497 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
4498 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4499 74409b12 Iustin Pop
4500 74409b12 Iustin Pop
4501 74409b12 Iustin Pop
  def DeclareLocks(self, level):
4502 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
4503 74409b12 Iustin Pop
      self._LockInstancesNodes()
4504 a8083063 Iustin Pop
4505 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4506 a8083063 Iustin Pop
    """Build hooks env.
4507 a8083063 Iustin Pop

4508 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4509 a8083063 Iustin Pop

4510 a8083063 Iustin Pop
    """
4511 396e1b78 Michael Hanselmann
    args = dict()
4512 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
4513 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
4514 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
4515 338e51e8 Iustin Pop
      args['vcpus'] = self.be_bnew[constants.BE_VCPUS]
4516 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4517 396e1b78 Michael Hanselmann
      if self.do_ip:
4518 396e1b78 Michael Hanselmann
        ip = self.ip
4519 396e1b78 Michael Hanselmann
      else:
4520 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4521 396e1b78 Michael Hanselmann
      if self.bridge:
4522 396e1b78 Michael Hanselmann
        bridge = self.bridge
4523 396e1b78 Michael Hanselmann
      else:
4524 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4525 ef756965 Iustin Pop
      if self.mac:
4526 ef756965 Iustin Pop
        mac = self.mac
4527 ef756965 Iustin Pop
      else:
4528 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4529 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4530 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
4531 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(),
4532 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4533 a8083063 Iustin Pop
    return env, nl, nl
4534 a8083063 Iustin Pop
4535 a8083063 Iustin Pop
  def CheckPrereq(self):
4536 a8083063 Iustin Pop
    """Check prerequisites.
4537 a8083063 Iustin Pop

4538 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4539 a8083063 Iustin Pop

4540 a8083063 Iustin Pop
    """
4541 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4542 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4543 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4544 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4545 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4546 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4547 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4548 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4549 4300c4b6 Guido Trotter
    self.force = getattr(self.op, "force", None)
4550 338e51e8 Iustin Pop
    all_parms = [self.ip, self.bridge, self.mac]
4551 338e51e8 Iustin Pop
    if (all_parms.count(None) == len(all_parms) and
4552 338e51e8 Iustin Pop
        not self.op.hvparams and
4553 338e51e8 Iustin Pop
        not self.op.beparams):
4554 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4555 338e51e8 Iustin Pop
    for item in (constants.BE_MEMORY, constants.BE_VCPUS):
4556 338e51e8 Iustin Pop
      val = self.op.beparams.get(item, None)
4557 338e51e8 Iustin Pop
      if val is not None:
4558 338e51e8 Iustin Pop
        try:
4559 338e51e8 Iustin Pop
          val = int(val)
4560 338e51e8 Iustin Pop
        except ValueError, err:
4561 338e51e8 Iustin Pop
          raise errors.OpPrereqError("Invalid %s size: %s" % (item, str(err)))
4562 338e51e8 Iustin Pop
        self.op.beparams[item] = val
4563 a8083063 Iustin Pop
    if self.ip is not None:
4564 a8083063 Iustin Pop
      self.do_ip = True
4565 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4566 a8083063 Iustin Pop
        self.ip = None
4567 a8083063 Iustin Pop
      else:
4568 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4569 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4570 a8083063 Iustin Pop
    else:
4571 a8083063 Iustin Pop
      self.do_ip = False
4572 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4573 1862d460 Alexander Schreiber
    if self.mac is not None:
4574 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4575 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4576 1862d460 Alexander Schreiber
                                   self.mac)
4577 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4578 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4579 a8083063 Iustin Pop
4580 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
4581 31a853d2 Iustin Pop
4582 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4583 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4584 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4585 74409b12 Iustin Pop
    pnode = self.instance.primary_node
4586 74409b12 Iustin Pop
    nodelist = [pnode]
4587 74409b12 Iustin Pop
    nodelist.extend(instance.secondary_nodes)
4588 74409b12 Iustin Pop
4589 338e51e8 Iustin Pop
    # hvparams processing
4590 74409b12 Iustin Pop
    if self.op.hvparams:
4591 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
4592 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
4593 74409b12 Iustin Pop
        if val is None:
4594 74409b12 Iustin Pop
          try:
4595 74409b12 Iustin Pop
            del i_hvdict[key]
4596 74409b12 Iustin Pop
          except KeyError:
4597 74409b12 Iustin Pop
            pass
4598 74409b12 Iustin Pop
        else:
4599 74409b12 Iustin Pop
          i_hvdict[key] = val
4600 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4601 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
4602 74409b12 Iustin Pop
                                i_hvdict)
4603 74409b12 Iustin Pop
      # local check
4604 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
4605 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
4606 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
4607 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
4608 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
4609 338e51e8 Iustin Pop
    else:
4610 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
4611 338e51e8 Iustin Pop
4612 338e51e8 Iustin Pop
    # beparams processing
4613 338e51e8 Iustin Pop
    if self.op.beparams:
4614 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
4615 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
4616 338e51e8 Iustin Pop
        if val is None:
4617 338e51e8 Iustin Pop
          try:
4618 338e51e8 Iustin Pop
            del i_bedict[key]
4619 338e51e8 Iustin Pop
          except KeyError:
4620 338e51e8 Iustin Pop
            pass
4621 338e51e8 Iustin Pop
        else:
4622 338e51e8 Iustin Pop
          i_bedict[key] = val
4623 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4624 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4625 338e51e8 Iustin Pop
                                i_bedict)
4626 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
4627 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
4628 338e51e8 Iustin Pop
    else:
4629 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
4630 74409b12 Iustin Pop
4631 cfefe007 Guido Trotter
    self.warn = []
4632 647a5d80 Iustin Pop
4633 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
4634 647a5d80 Iustin Pop
      mem_check_list = [pnode]
4635 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
4636 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
4637 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
4638 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
4639 72737a7f Iustin Pop
                                                  instance.hypervisor)
4640 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
4641 72737a7f Iustin Pop
                                         instance.hypervisor)
4642 cfefe007 Guido Trotter
4643 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4644 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4645 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4646 cfefe007 Guido Trotter
      else:
4647 cfefe007 Guido Trotter
        if instance_info:
4648 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4649 cfefe007 Guido Trotter
        else:
4650 cfefe007 Guido Trotter
          # Assume instance not running
4651 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4652 cfefe007 Guido Trotter
          # and we have no other way to check)
4653 cfefe007 Guido Trotter
          current_mem = 0
4654 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
4655 338e51e8 Iustin Pop
                    nodeinfo[pnode]['memory_free'])
4656 cfefe007 Guido Trotter
        if miss_mem > 0:
4657 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4658 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4659 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4660 cfefe007 Guido Trotter
4661 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
4662 647a5d80 Iustin Pop
        for node in instance.secondary_nodes:
4663 647a5d80 Iustin Pop
          if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4664 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
4665 647a5d80 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nodeinfo[node]['memory_free']:
4666 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
4667 647a5d80 Iustin Pop
                             " secondary node %s" % node)
4668 5bc84f33 Alexander Schreiber
4669 a8083063 Iustin Pop
    return
4670 a8083063 Iustin Pop
4671 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4672 a8083063 Iustin Pop
    """Modifies an instance.
4673 a8083063 Iustin Pop

4674 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4675 a8083063 Iustin Pop
    """
4676 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4677 cfefe007 Guido Trotter
    # feedback_fn there.
4678 cfefe007 Guido Trotter
    for warn in self.warn:
4679 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4680 cfefe007 Guido Trotter
4681 a8083063 Iustin Pop
    result = []
4682 a8083063 Iustin Pop
    instance = self.instance
4683 a8083063 Iustin Pop
    if self.do_ip:
4684 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4685 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4686 a8083063 Iustin Pop
    if self.bridge:
4687 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4688 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4689 1862d460 Alexander Schreiber
    if self.mac:
4690 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4691 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4692 74409b12 Iustin Pop
    if self.op.hvparams:
4693 74409b12 Iustin Pop
      instance.hvparams = self.hv_new
4694 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
4695 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
4696 338e51e8 Iustin Pop
    if self.op.beparams:
4697 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
4698 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
4699 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
4700 a8083063 Iustin Pop
4701 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4702 a8083063 Iustin Pop
4703 a8083063 Iustin Pop
    return result
4704 a8083063 Iustin Pop
4705 a8083063 Iustin Pop
4706 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4707 a8083063 Iustin Pop
  """Query the exports list
4708 a8083063 Iustin Pop

4709 a8083063 Iustin Pop
  """
4710 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
4711 21a15682 Guido Trotter
  REQ_BGL = False
4712 21a15682 Guido Trotter
4713 21a15682 Guido Trotter
  def ExpandNames(self):
4714 21a15682 Guido Trotter
    self.needed_locks = {}
4715 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4716 21a15682 Guido Trotter
    if not self.op.nodes:
4717 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4718 21a15682 Guido Trotter
    else:
4719 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
4720 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
4721 a8083063 Iustin Pop
4722 a8083063 Iustin Pop
  def CheckPrereq(self):
4723 21a15682 Guido Trotter
    """Check prerequisites.
4724 a8083063 Iustin Pop

4725 a8083063 Iustin Pop
    """
4726 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
4727 a8083063 Iustin Pop
4728 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4729 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4730 a8083063 Iustin Pop

4731 a8083063 Iustin Pop
    Returns:
4732 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4733 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4734 a8083063 Iustin Pop
      that node.
4735 a8083063 Iustin Pop

4736 a8083063 Iustin Pop
    """
4737 72737a7f Iustin Pop
    return self.rpc.call_export_list(self.nodes)
4738 a8083063 Iustin Pop
4739 a8083063 Iustin Pop
4740 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4741 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4742 a8083063 Iustin Pop

4743 a8083063 Iustin Pop
  """
4744 a8083063 Iustin Pop
  HPATH = "instance-export"
4745 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4746 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4747 6657590e Guido Trotter
  REQ_BGL = False
4748 6657590e Guido Trotter
4749 6657590e Guido Trotter
  def ExpandNames(self):
4750 6657590e Guido Trotter
    self._ExpandAndLockInstance()
4751 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
4752 6657590e Guido Trotter
    #
4753 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
4754 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
4755 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
4756 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
4757 6657590e Guido Trotter
    #    then one to remove, after
4758 6657590e Guido Trotter
    #  - removing the removal operation altoghether
4759 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4760 6657590e Guido Trotter
4761 6657590e Guido Trotter
  def DeclareLocks(self, level):
4762 6657590e Guido Trotter
    """Last minute lock declaration."""
4763 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
4764 a8083063 Iustin Pop
4765 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4766 a8083063 Iustin Pop
    """Build hooks env.
4767 a8083063 Iustin Pop

4768 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4769 a8083063 Iustin Pop

4770 a8083063 Iustin Pop
    """
4771 a8083063 Iustin Pop
    env = {
4772 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4773 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4774 a8083063 Iustin Pop
      }
4775 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4776 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
4777 a8083063 Iustin Pop
          self.op.target_node]
4778 a8083063 Iustin Pop
    return env, nl, nl
4779 a8083063 Iustin Pop
4780 a8083063 Iustin Pop
  def CheckPrereq(self):
4781 a8083063 Iustin Pop
    """Check prerequisites.
4782 a8083063 Iustin Pop

4783 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4784 a8083063 Iustin Pop

4785 a8083063 Iustin Pop
    """
4786 6657590e Guido Trotter
    instance_name = self.op.instance_name
4787 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4788 6657590e Guido Trotter
    assert self.instance is not None, \
4789 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
4790 a8083063 Iustin Pop
4791 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
4792 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
4793 a8083063 Iustin Pop
4794 6657590e Guido Trotter
    assert self.dst_node is not None, \
4795 6657590e Guido Trotter
          "Cannot retrieve locked node %s" % self.op.target_node
4796 a8083063 Iustin Pop
4797 b6023d6c Manuel Franceschini
    # instance disk type verification
4798 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4799 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4800 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4801 b6023d6c Manuel Franceschini
                                   " file-based disks")
4802 b6023d6c Manuel Franceschini
4803 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4804 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4805 a8083063 Iustin Pop

4806 a8083063 Iustin Pop
    """
4807 a8083063 Iustin Pop
    instance = self.instance
4808 a8083063 Iustin Pop
    dst_node = self.dst_node
4809 a8083063 Iustin Pop
    src_node = instance.primary_node
4810 a8083063 Iustin Pop
    if self.op.shutdown:
4811 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4812 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(src_node, instance):
4813 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4814 38206f3c Iustin Pop
                                 (instance.name, src_node))
4815 a8083063 Iustin Pop
4816 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4817 a8083063 Iustin Pop
4818 a8083063 Iustin Pop
    snap_disks = []
4819 a8083063 Iustin Pop
4820 a8083063 Iustin Pop
    try:
4821 a8083063 Iustin Pop
      for disk in instance.disks:
4822 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4823 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4824 72737a7f Iustin Pop
          new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
4825 a8083063 Iustin Pop
4826 a8083063 Iustin Pop
          if not new_dev_name:
4827 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4828 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4829 a8083063 Iustin Pop
          else:
4830 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4831 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4832 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4833 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4834 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4835 a8083063 Iustin Pop
4836 a8083063 Iustin Pop
    finally:
4837 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4838 72737a7f Iustin Pop
        if not self.rpc.call_instance_start(src_node, instance, None):
4839 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
4840 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4841 a8083063 Iustin Pop
4842 a8083063 Iustin Pop
    # TODO: check for size
4843 a8083063 Iustin Pop
4844 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
4845 a8083063 Iustin Pop
    for dev in snap_disks:
4846 72737a7f Iustin Pop
      if not self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
4847 62c9ec92 Iustin Pop
                                      instance, cluster_name):
4848 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4849 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4850 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_remove(src_node, dev):
4851 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4852 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4853 a8083063 Iustin Pop
4854 72737a7f Iustin Pop
    if not self.rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4855 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4856 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4857 a8083063 Iustin Pop
4858 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4859 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4860 a8083063 Iustin Pop
4861 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4862 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4863 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4864 a8083063 Iustin Pop
    if nodelist:
4865 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
4866 a8083063 Iustin Pop
      for node in exportlist:
4867 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4868 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
4869 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4870 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4871 5c947f38 Iustin Pop
4872 5c947f38 Iustin Pop
4873 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4874 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4875 9ac99fda Guido Trotter

4876 9ac99fda Guido Trotter
  """
4877 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4878 3656b3af Guido Trotter
  REQ_BGL = False
4879 3656b3af Guido Trotter
4880 3656b3af Guido Trotter
  def ExpandNames(self):
4881 3656b3af Guido Trotter
    self.needed_locks = {}
4882 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
4883 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
4884 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
4885 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4886 9ac99fda Guido Trotter
4887 9ac99fda Guido Trotter
  def CheckPrereq(self):
4888 9ac99fda Guido Trotter
    """Check prerequisites.
4889 9ac99fda Guido Trotter
    """
4890 9ac99fda Guido Trotter
    pass
4891 9ac99fda Guido Trotter
4892 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4893 9ac99fda Guido Trotter
    """Remove any export.
4894 9ac99fda Guido Trotter

4895 9ac99fda Guido Trotter
    """
4896 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4897 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4898 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4899 9ac99fda Guido Trotter
    fqdn_warn = False
4900 9ac99fda Guido Trotter
    if not instance_name:
4901 9ac99fda Guido Trotter
      fqdn_warn = True
4902 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4903 9ac99fda Guido Trotter
4904 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
4905 72737a7f Iustin Pop
      locking.LEVEL_NODE])
4906 9ac99fda Guido Trotter
    found = False
4907 9ac99fda Guido Trotter
    for node in exportlist:
4908 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4909 9ac99fda Guido Trotter
        found = True
4910 72737a7f Iustin Pop
        if not self.rpc.call_export_remove(node, instance_name):
4911 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4912 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4913 9ac99fda Guido Trotter
4914 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4915 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4916 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4917 9ac99fda Guido Trotter
                  " Domain Name.")
4918 9ac99fda Guido Trotter
4919 9ac99fda Guido Trotter
4920 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4921 5c947f38 Iustin Pop
  """Generic tags LU.
4922 5c947f38 Iustin Pop

4923 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4924 5c947f38 Iustin Pop

4925 5c947f38 Iustin Pop
  """
4926 5c947f38 Iustin Pop
4927 8646adce Guido Trotter
  def ExpandNames(self):
4928 8646adce Guido Trotter
    self.needed_locks = {}
4929 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
4930 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4931 5c947f38 Iustin Pop
      if name is None:
4932 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4933 3ecf6786 Iustin Pop
                                   (self.op.name,))
4934 5c947f38 Iustin Pop
      self.op.name = name
4935 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
4936 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4937 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4938 5c947f38 Iustin Pop
      if name is None:
4939 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4940 3ecf6786 Iustin Pop
                                   (self.op.name,))
4941 5c947f38 Iustin Pop
      self.op.name = name
4942 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
4943 8646adce Guido Trotter
4944 8646adce Guido Trotter
  def CheckPrereq(self):
4945 8646adce Guido Trotter
    """Check prerequisites.
4946 8646adce Guido Trotter

4947 8646adce Guido Trotter
    """
4948 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
4949 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
4950 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
4951 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
4952 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
4953 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
4954 5c947f38 Iustin Pop
    else:
4955 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4956 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4957 5c947f38 Iustin Pop
4958 5c947f38 Iustin Pop
4959 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4960 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4961 5c947f38 Iustin Pop

4962 5c947f38 Iustin Pop
  """
4963 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4964 8646adce Guido Trotter
  REQ_BGL = False
4965 5c947f38 Iustin Pop
4966 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4967 5c947f38 Iustin Pop
    """Returns the tag list.
4968 5c947f38 Iustin Pop

4969 5c947f38 Iustin Pop
    """
4970 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
4971 5c947f38 Iustin Pop
4972 5c947f38 Iustin Pop
4973 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4974 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4975 73415719 Iustin Pop

4976 73415719 Iustin Pop
  """
4977 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4978 8646adce Guido Trotter
  REQ_BGL = False
4979 8646adce Guido Trotter
4980 8646adce Guido Trotter
  def ExpandNames(self):
4981 8646adce Guido Trotter
    self.needed_locks = {}
4982 73415719 Iustin Pop
4983 73415719 Iustin Pop
  def CheckPrereq(self):
4984 73415719 Iustin Pop
    """Check prerequisites.
4985 73415719 Iustin Pop

4986 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4987 73415719 Iustin Pop

4988 73415719 Iustin Pop
    """
4989 73415719 Iustin Pop
    try:
4990 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4991 73415719 Iustin Pop
    except re.error, err:
4992 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4993 73415719 Iustin Pop
                                 (self.op.pattern, err))
4994 73415719 Iustin Pop
4995 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4996 73415719 Iustin Pop
    """Returns the tag list.
4997 73415719 Iustin Pop

4998 73415719 Iustin Pop
    """
4999 73415719 Iustin Pop
    cfg = self.cfg
5000 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
5001 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
5002 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
5003 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
5004 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
5005 73415719 Iustin Pop
    results = []
5006 73415719 Iustin Pop
    for path, target in tgts:
5007 73415719 Iustin Pop
      for tag in target.GetTags():
5008 73415719 Iustin Pop
        if self.re.search(tag):
5009 73415719 Iustin Pop
          results.append((path, tag))
5010 73415719 Iustin Pop
    return results
5011 73415719 Iustin Pop
5012 73415719 Iustin Pop
5013 f27302fa Iustin Pop
class LUAddTags(TagsLU):
5014 5c947f38 Iustin Pop
  """Sets a tag on a given object.
5015 5c947f38 Iustin Pop

5016 5c947f38 Iustin Pop
  """
5017 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5018 8646adce Guido Trotter
  REQ_BGL = False
5019 5c947f38 Iustin Pop
5020 5c947f38 Iustin Pop
  def CheckPrereq(self):
5021 5c947f38 Iustin Pop
    """Check prerequisites.
5022 5c947f38 Iustin Pop

5023 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
5024 5c947f38 Iustin Pop

5025 5c947f38 Iustin Pop
    """
5026 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5027 f27302fa Iustin Pop
    for tag in self.op.tags:
5028 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5029 5c947f38 Iustin Pop
5030 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5031 5c947f38 Iustin Pop
    """Sets the tag.
5032 5c947f38 Iustin Pop

5033 5c947f38 Iustin Pop
    """
5034 5c947f38 Iustin Pop
    try:
5035 f27302fa Iustin Pop
      for tag in self.op.tags:
5036 f27302fa Iustin Pop
        self.target.AddTag(tag)
5037 5c947f38 Iustin Pop
    except errors.TagError, err:
5038 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
5039 5c947f38 Iustin Pop
    try:
5040 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5041 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5042 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5043 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5044 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5045 5c947f38 Iustin Pop
5046 5c947f38 Iustin Pop
5047 f27302fa Iustin Pop
class LUDelTags(TagsLU):
5048 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
5049 5c947f38 Iustin Pop

5050 5c947f38 Iustin Pop
  """
5051 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5052 8646adce Guido Trotter
  REQ_BGL = False
5053 5c947f38 Iustin Pop
5054 5c947f38 Iustin Pop
  def CheckPrereq(self):
5055 5c947f38 Iustin Pop
    """Check prerequisites.
5056 5c947f38 Iustin Pop

5057 5c947f38 Iustin Pop
    This checks that we have the given tag.
5058 5c947f38 Iustin Pop

5059 5c947f38 Iustin Pop
    """
5060 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5061 f27302fa Iustin Pop
    for tag in self.op.tags:
5062 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5063 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
5064 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
5065 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
5066 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
5067 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
5068 f27302fa Iustin Pop
      diff_names.sort()
5069 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
5070 f27302fa Iustin Pop
                                 (",".join(diff_names)))
5071 5c947f38 Iustin Pop
5072 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5073 5c947f38 Iustin Pop
    """Remove the tag from the object.
5074 5c947f38 Iustin Pop

5075 5c947f38 Iustin Pop
    """
5076 f27302fa Iustin Pop
    for tag in self.op.tags:
5077 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
5078 5c947f38 Iustin Pop
    try:
5079 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5080 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5081 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5082 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5083 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5084 06009e27 Iustin Pop
5085 0eed6e61 Guido Trotter
5086 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
5087 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
5088 06009e27 Iustin Pop

5089 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
5090 06009e27 Iustin Pop
  time.
5091 06009e27 Iustin Pop

5092 06009e27 Iustin Pop
  """
5093 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5094 fbe9022f Guido Trotter
  REQ_BGL = False
5095 06009e27 Iustin Pop
5096 fbe9022f Guido Trotter
  def ExpandNames(self):
5097 fbe9022f Guido Trotter
    """Expand names and set required locks.
5098 06009e27 Iustin Pop

5099 fbe9022f Guido Trotter
    This expands the node list, if any.
5100 06009e27 Iustin Pop

5101 06009e27 Iustin Pop
    """
5102 fbe9022f Guido Trotter
    self.needed_locks = {}
5103 06009e27 Iustin Pop
    if self.op.on_nodes:
5104 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
5105 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5106 fbe9022f Guido Trotter
      # more information.
5107 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5108 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5109 fbe9022f Guido Trotter
5110 fbe9022f Guido Trotter
  def CheckPrereq(self):
5111 fbe9022f Guido Trotter
    """Check prerequisites.
5112 fbe9022f Guido Trotter

5113 fbe9022f Guido Trotter
    """
5114 06009e27 Iustin Pop
5115 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
5116 06009e27 Iustin Pop
    """Do the actual sleep.
5117 06009e27 Iustin Pop

5118 06009e27 Iustin Pop
    """
5119 06009e27 Iustin Pop
    if self.op.on_master:
5120 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
5121 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
5122 06009e27 Iustin Pop
    if self.op.on_nodes:
5123 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5124 06009e27 Iustin Pop
      if not result:
5125 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
5126 06009e27 Iustin Pop
      for node, node_result in result.items():
5127 06009e27 Iustin Pop
        if not node_result:
5128 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
5129 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
5130 d61df03e Iustin Pop
5131 d61df03e Iustin Pop
5132 d1c2dd75 Iustin Pop
class IAllocator(object):
5133 d1c2dd75 Iustin Pop
  """IAllocator framework.
5134 d61df03e Iustin Pop

5135 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
5136 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
5137 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
5138 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
5139 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
5140 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
5141 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
5142 d1c2dd75 Iustin Pop
      easy usage
5143 d61df03e Iustin Pop

5144 d61df03e Iustin Pop
  """
5145 29859cb7 Iustin Pop
  _ALLO_KEYS = [
5146 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
5147 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
5148 d1c2dd75 Iustin Pop
    ]
5149 29859cb7 Iustin Pop
  _RELO_KEYS = [
5150 29859cb7 Iustin Pop
    "relocate_from",
5151 29859cb7 Iustin Pop
    ]
5152 d1c2dd75 Iustin Pop
5153 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
5154 72737a7f Iustin Pop
    self.lu = lu
5155 d1c2dd75 Iustin Pop
    # init buffer variables
5156 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
5157 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
5158 29859cb7 Iustin Pop
    self.mode = mode
5159 29859cb7 Iustin Pop
    self.name = name
5160 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
5161 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
5162 29859cb7 Iustin Pop
    self.relocate_from = None
5163 27579978 Iustin Pop
    # computed fields
5164 27579978 Iustin Pop
    self.required_nodes = None
5165 d1c2dd75 Iustin Pop
    # init result fields
5166 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
5167 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5168 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
5169 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5170 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
5171 29859cb7 Iustin Pop
    else:
5172 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5173 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
5174 d1c2dd75 Iustin Pop
    for key in kwargs:
5175 29859cb7 Iustin Pop
      if key not in keyset:
5176 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5177 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5178 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
5179 29859cb7 Iustin Pop
    for key in keyset:
5180 d1c2dd75 Iustin Pop
      if key not in kwargs:
5181 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5182 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5183 d1c2dd75 Iustin Pop
    self._BuildInputData()
5184 d1c2dd75 Iustin Pop
5185 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
5186 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
5187 d1c2dd75 Iustin Pop

5188 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
5189 d1c2dd75 Iustin Pop

5190 d1c2dd75 Iustin Pop
    """
5191 72737a7f Iustin Pop
    cfg = self.lu.cfg
5192 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
5193 d1c2dd75 Iustin Pop
    # cluster data
5194 d1c2dd75 Iustin Pop
    data = {
5195 d1c2dd75 Iustin Pop
      "version": 1,
5196 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
5197 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
5198 e69d05fd Iustin Pop
      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5199 d1c2dd75 Iustin Pop
      # we don't have job IDs
5200 d61df03e Iustin Pop
      }
5201 d61df03e Iustin Pop
5202 338e51e8 Iustin Pop
    i_list = []
5203 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5204 338e51e8 Iustin Pop
    for iname in cfg.GetInstanceList():
5205 338e51e8 Iustin Pop
      i_obj = cfg.GetInstanceInfo(iname)
5206 338e51e8 Iustin Pop
      i_list.append((i_obj, cluster.FillBE(i_obj)))
5207 6286519f Iustin Pop
5208 d1c2dd75 Iustin Pop
    # node data
5209 d1c2dd75 Iustin Pop
    node_results = {}
5210 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
5211 e69d05fd Iustin Pop
    # FIXME: here we have only one hypervisor information, but
5212 e69d05fd Iustin Pop
    # instance can belong to different hypervisors
5213 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
5214 72737a7f Iustin Pop
                                           cfg.GetHypervisorType())
5215 d1c2dd75 Iustin Pop
    for nname in node_list:
5216 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
5217 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
5218 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
5219 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
5220 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5221 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
5222 d1c2dd75 Iustin Pop
        if attr not in remote_info:
5223 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5224 d1c2dd75 Iustin Pop
                                   (nname, attr))
5225 d1c2dd75 Iustin Pop
        try:
5226 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
5227 d1c2dd75 Iustin Pop
        except ValueError, err:
5228 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5229 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
5230 6286519f Iustin Pop
      # compute memory used by primary instances
5231 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
5232 338e51e8 Iustin Pop
      for iinfo, beinfo in i_list:
5233 6286519f Iustin Pop
        if iinfo.primary_node == nname:
5234 338e51e8 Iustin Pop
          i_p_mem += beinfo[constants.BE_MEMORY]
5235 6286519f Iustin Pop
          if iinfo.status == "up":
5236 338e51e8 Iustin Pop
            i_p_up_mem += beinfo[constants.BE_MEMORY]
5237 6286519f Iustin Pop
5238 b2662e7f Iustin Pop
      # compute memory used by instances
5239 d1c2dd75 Iustin Pop
      pnr = {
5240 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
5241 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
5242 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
5243 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
5244 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
5245 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
5246 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
5247 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
5248 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
5249 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5250 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5251 d1c2dd75 Iustin Pop
        }
5252 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5253 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5254 d1c2dd75 Iustin Pop
5255 d1c2dd75 Iustin Pop
    # instance data
5256 d1c2dd75 Iustin Pop
    instance_data = {}
5257 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
5258 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5259 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5260 d1c2dd75 Iustin Pop
      pir = {
5261 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5262 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5263 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
5264 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
5265 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5266 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5267 d1c2dd75 Iustin Pop
        "nics": nic_data,
5268 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5269 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5270 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
5271 d1c2dd75 Iustin Pop
        }
5272 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5273 d61df03e Iustin Pop
5274 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5275 d61df03e Iustin Pop
5276 d1c2dd75 Iustin Pop
    self.in_data = data
5277 d61df03e Iustin Pop
5278 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5279 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5280 d61df03e Iustin Pop

5281 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5282 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5283 d61df03e Iustin Pop

5284 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5285 d1c2dd75 Iustin Pop
    done.
5286 d61df03e Iustin Pop

5287 d1c2dd75 Iustin Pop
    """
5288 d1c2dd75 Iustin Pop
    data = self.in_data
5289 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5290 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5291 d1c2dd75 Iustin Pop
5292 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
5293 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
5294 d1c2dd75 Iustin Pop
5295 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5296 27579978 Iustin Pop
      self.required_nodes = 2
5297 27579978 Iustin Pop
    else:
5298 27579978 Iustin Pop
      self.required_nodes = 1
5299 d1c2dd75 Iustin Pop
    request = {
5300 d1c2dd75 Iustin Pop
      "type": "allocate",
5301 d1c2dd75 Iustin Pop
      "name": self.name,
5302 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5303 d1c2dd75 Iustin Pop
      "tags": self.tags,
5304 d1c2dd75 Iustin Pop
      "os": self.os,
5305 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5306 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5307 d1c2dd75 Iustin Pop
      "disks": self.disks,
5308 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5309 d1c2dd75 Iustin Pop
      "nics": self.nics,
5310 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5311 d1c2dd75 Iustin Pop
      }
5312 d1c2dd75 Iustin Pop
    data["request"] = request
5313 298fe380 Iustin Pop
5314 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5315 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5316 298fe380 Iustin Pop

5317 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5318 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5319 d61df03e Iustin Pop

5320 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5321 d1c2dd75 Iustin Pop
    done.
5322 d61df03e Iustin Pop

5323 d1c2dd75 Iustin Pop
    """
5324 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
5325 27579978 Iustin Pop
    if instance is None:
5326 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5327 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5328 27579978 Iustin Pop
5329 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5330 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5331 27579978 Iustin Pop
5332 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5333 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5334 2a139bb0 Iustin Pop
5335 27579978 Iustin Pop
    self.required_nodes = 1
5336 27579978 Iustin Pop
5337 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
5338 27579978 Iustin Pop
                                  instance.disks[0].size,
5339 27579978 Iustin Pop
                                  instance.disks[1].size)
5340 27579978 Iustin Pop
5341 d1c2dd75 Iustin Pop
    request = {
5342 2a139bb0 Iustin Pop
      "type": "relocate",
5343 d1c2dd75 Iustin Pop
      "name": self.name,
5344 27579978 Iustin Pop
      "disk_space_total": disk_space,
5345 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5346 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5347 d1c2dd75 Iustin Pop
      }
5348 27579978 Iustin Pop
    self.in_data["request"] = request
5349 d61df03e Iustin Pop
5350 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5351 d1c2dd75 Iustin Pop
    """Build input data structures.
5352 d61df03e Iustin Pop

5353 d1c2dd75 Iustin Pop
    """
5354 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5355 d61df03e Iustin Pop
5356 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5357 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5358 d1c2dd75 Iustin Pop
    else:
5359 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5360 d61df03e Iustin Pop
5361 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5362 d61df03e Iustin Pop
5363 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
5364 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5365 298fe380 Iustin Pop

5366 d1c2dd75 Iustin Pop
    """
5367 72737a7f Iustin Pop
    if call_fn is None:
5368 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
5369 d1c2dd75 Iustin Pop
    data = self.in_text
5370 298fe380 Iustin Pop
5371 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
5372 298fe380 Iustin Pop
5373 43f5ea7a Guido Trotter
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5374 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5375 8d528b7c Iustin Pop
5376 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5377 8d528b7c Iustin Pop
5378 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5379 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5380 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5381 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5382 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5383 8d528b7c Iustin Pop
    self.out_text = stdout
5384 d1c2dd75 Iustin Pop
    if validate:
5385 d1c2dd75 Iustin Pop
      self._ValidateResult()
5386 298fe380 Iustin Pop
5387 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5388 d1c2dd75 Iustin Pop
    """Process the allocator results.
5389 538475ca Iustin Pop

5390 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5391 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5392 538475ca Iustin Pop

5393 d1c2dd75 Iustin Pop
    """
5394 d1c2dd75 Iustin Pop
    try:
5395 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5396 d1c2dd75 Iustin Pop
    except Exception, err:
5397 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5398 d1c2dd75 Iustin Pop
5399 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5400 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5401 538475ca Iustin Pop
5402 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5403 d1c2dd75 Iustin Pop
      if key not in rdict:
5404 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5405 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5406 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5407 538475ca Iustin Pop
5408 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5409 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5410 d1c2dd75 Iustin Pop
                               " is not a list")
5411 d1c2dd75 Iustin Pop
    self.out_data = rdict
5412 538475ca Iustin Pop
5413 538475ca Iustin Pop
5414 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5415 d61df03e Iustin Pop
  """Run allocator tests.
5416 d61df03e Iustin Pop

5417 d61df03e Iustin Pop
  This LU runs the allocator tests
5418 d61df03e Iustin Pop

5419 d61df03e Iustin Pop
  """
5420 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5421 d61df03e Iustin Pop
5422 d61df03e Iustin Pop
  def CheckPrereq(self):
5423 d61df03e Iustin Pop
    """Check prerequisites.
5424 d61df03e Iustin Pop

5425 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5426 d61df03e Iustin Pop

5427 d61df03e Iustin Pop
    """
5428 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5429 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5430 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5431 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5432 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5433 d61df03e Iustin Pop
                                     attr)
5434 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5435 d61df03e Iustin Pop
      if iname is not None:
5436 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5437 d61df03e Iustin Pop
                                   iname)
5438 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5439 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5440 d61df03e Iustin Pop
      for row in self.op.nics:
5441 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5442 d61df03e Iustin Pop
            "mac" not in row or
5443 d61df03e Iustin Pop
            "ip" not in row or
5444 d61df03e Iustin Pop
            "bridge" not in row):
5445 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5446 d61df03e Iustin Pop
                                     " 'nics' parameter")
5447 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5448 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5449 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5450 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5451 d61df03e Iustin Pop
      for row in self.op.disks:
5452 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5453 d61df03e Iustin Pop
            "size" not in row or
5454 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5455 d61df03e Iustin Pop
            "mode" not in row or
5456 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5457 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5458 d61df03e Iustin Pop
                                     " 'disks' parameter")
5459 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5460 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5461 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5462 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5463 d61df03e Iustin Pop
      if fname is None:
5464 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5465 d61df03e Iustin Pop
                                   self.op.name)
5466 d61df03e Iustin Pop
      self.op.name = fname
5467 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5468 d61df03e Iustin Pop
    else:
5469 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5470 d61df03e Iustin Pop
                                 self.op.mode)
5471 d61df03e Iustin Pop
5472 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5473 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5474 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5475 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5476 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5477 d61df03e Iustin Pop
                                 self.op.direction)
5478 d61df03e Iustin Pop
5479 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5480 d61df03e Iustin Pop
    """Run the allocator test.
5481 d61df03e Iustin Pop

5482 d61df03e Iustin Pop
    """
5483 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5484 72737a7f Iustin Pop
      ial = IAllocator(self,
5485 29859cb7 Iustin Pop
                       mode=self.op.mode,
5486 29859cb7 Iustin Pop
                       name=self.op.name,
5487 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5488 29859cb7 Iustin Pop
                       disks=self.op.disks,
5489 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5490 29859cb7 Iustin Pop
                       os=self.op.os,
5491 29859cb7 Iustin Pop
                       tags=self.op.tags,
5492 29859cb7 Iustin Pop
                       nics=self.op.nics,
5493 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5494 29859cb7 Iustin Pop
                       )
5495 29859cb7 Iustin Pop
    else:
5496 72737a7f Iustin Pop
      ial = IAllocator(self,
5497 29859cb7 Iustin Pop
                       mode=self.op.mode,
5498 29859cb7 Iustin Pop
                       name=self.op.name,
5499 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5500 29859cb7 Iustin Pop
                       )
5501 d61df03e Iustin Pop
5502 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5503 d1c2dd75 Iustin Pop
      result = ial.in_text
5504 298fe380 Iustin Pop
    else:
5505 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5506 d1c2dd75 Iustin Pop
      result = ial.out_text
5507 298fe380 Iustin Pop
    return result