Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ d50b3059

History | View | Annotate | Download (193.8 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 74409b12 Iustin Pop
import copy
35 6de7c41d Iustin Pop
import itertools
36 a8083063 Iustin Pop
37 a8083063 Iustin Pop
from ganeti import ssh
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 6048c986 Guido Trotter
from ganeti import locking
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 8d14b30d Iustin Pop
from ganeti import serializer
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 d465bdc8 Guido Trotter
    - implement ExpandNames
53 d465bdc8 Guido Trotter
    - implement CheckPrereq
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 05f86716 Guido Trotter
    - optionally redefine their run requirements:
58 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 a8083063 Iustin Pop
  REQ_MASTER = True
68 7e55040e Guido Trotter
  REQ_BGL = True
69 a8083063 Iustin Pop
70 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
71 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
72 a8083063 Iustin Pop

73 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
74 a8083063 Iustin Pop
    validity.
75 a8083063 Iustin Pop

76 a8083063 Iustin Pop
    """
77 5bfac263 Iustin Pop
    self.proc = processor
78 a8083063 Iustin Pop
    self.op = op
79 77b657a3 Guido Trotter
    self.cfg = context.cfg
80 77b657a3 Guido Trotter
    self.context = context
81 72737a7f Iustin Pop
    self.rpc = rpc
82 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
83 d465bdc8 Guido Trotter
    self.needed_locks = None
84 6683bba2 Guido Trotter
    self.acquired_locks = {}
85 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
86 ca2a79e1 Guido Trotter
    self.add_locks = {}
87 ca2a79e1 Guido Trotter
    self.remove_locks = {}
88 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
89 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
90 c92b310a Michael Hanselmann
    self.__ssh = None
91 86d9d3bb Iustin Pop
    # logging
92 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
93 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
94 c92b310a Michael Hanselmann
95 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
96 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
97 a8083063 Iustin Pop
      if attr_val is None:
98 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
99 3ecf6786 Iustin Pop
                                   attr_name)
100 c6d58a2b Michael Hanselmann
101 f64c9de6 Guido Trotter
    if not self.cfg.IsCluster():
102 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
103 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
104 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
105 d6a02168 Michael Hanselmann
      master = self.cfg.GetMasterNode()
106 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
107 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
108 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
109 a8083063 Iustin Pop
110 c92b310a Michael Hanselmann
  def __GetSSH(self):
111 c92b310a Michael Hanselmann
    """Returns the SshRunner object
112 c92b310a Michael Hanselmann

113 c92b310a Michael Hanselmann
    """
114 c92b310a Michael Hanselmann
    if not self.__ssh:
115 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
116 c92b310a Michael Hanselmann
    return self.__ssh
117 c92b310a Michael Hanselmann
118 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
119 c92b310a Michael Hanselmann
120 d465bdc8 Guido Trotter
  def ExpandNames(self):
121 d465bdc8 Guido Trotter
    """Expand names for this LU.
122 d465bdc8 Guido Trotter

123 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
124 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
125 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
126 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
127 d465bdc8 Guido Trotter

128 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
129 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
130 d465bdc8 Guido Trotter
    as values. Rules:
131 d465bdc8 Guido Trotter
      - Use an empty dict if you don't need any lock
132 d465bdc8 Guido Trotter
      - If you don't need any lock at a particular level omit that level
133 d465bdc8 Guido Trotter
      - Don't put anything for the BGL level
134 e310b019 Guido Trotter
      - If you want all locks at a level use locking.ALL_SET as a value
135 d465bdc8 Guido Trotter

136 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
137 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
138 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
139 3977a4c1 Guido Trotter

140 d465bdc8 Guido Trotter
    Examples:
141 d465bdc8 Guido Trotter
    # Acquire all nodes and one instance
142 d465bdc8 Guido Trotter
    self.needed_locks = {
143 e310b019 Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
144 3a5d7305 Guido Trotter
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
145 d465bdc8 Guido Trotter
    }
146 d465bdc8 Guido Trotter
    # Acquire just two nodes
147 d465bdc8 Guido Trotter
    self.needed_locks = {
148 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
149 d465bdc8 Guido Trotter
    }
150 d465bdc8 Guido Trotter
    # Acquire no locks
151 d465bdc8 Guido Trotter
    self.needed_locks = {} # No, you can't leave it to the default value None
152 d465bdc8 Guido Trotter

153 d465bdc8 Guido Trotter
    """
154 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
155 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
156 d465bdc8 Guido Trotter
    # time.
157 d465bdc8 Guido Trotter
    if self.REQ_BGL:
158 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
159 d465bdc8 Guido Trotter
    else:
160 d465bdc8 Guido Trotter
      raise NotImplementedError
161 d465bdc8 Guido Trotter
162 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
163 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
164 fb8dcb62 Guido Trotter

165 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
166 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
167 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
168 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
169 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
170 fb8dcb62 Guido Trotter
    default it does nothing.
171 fb8dcb62 Guido Trotter

172 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
173 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
174 fb8dcb62 Guido Trotter

175 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
176 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
177 fb8dcb62 Guido Trotter

178 fb8dcb62 Guido Trotter
    """
179 fb8dcb62 Guido Trotter
180 a8083063 Iustin Pop
  def CheckPrereq(self):
181 a8083063 Iustin Pop
    """Check prerequisites for this LU.
182 a8083063 Iustin Pop

183 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
184 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
185 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
186 a8083063 Iustin Pop
    allowed.
187 a8083063 Iustin Pop

188 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
189 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
190 a8083063 Iustin Pop

191 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
192 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
193 a8083063 Iustin Pop

194 a8083063 Iustin Pop
    """
195 a8083063 Iustin Pop
    raise NotImplementedError
196 a8083063 Iustin Pop
197 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
198 a8083063 Iustin Pop
    """Execute the LU.
199 a8083063 Iustin Pop

200 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
201 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
202 a8083063 Iustin Pop
    code, or expected.
203 a8083063 Iustin Pop

204 a8083063 Iustin Pop
    """
205 a8083063 Iustin Pop
    raise NotImplementedError
206 a8083063 Iustin Pop
207 a8083063 Iustin Pop
  def BuildHooksEnv(self):
208 a8083063 Iustin Pop
    """Build hooks environment for this LU.
209 a8083063 Iustin Pop

210 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
211 a8083063 Iustin Pop
    containing the environment that will be used for running the
212 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
213 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
214 a8083063 Iustin Pop
    the hook should run after the execution.
215 a8083063 Iustin Pop

216 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
217 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
218 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
219 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
220 a8083063 Iustin Pop

221 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
222 a8083063 Iustin Pop

223 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
224 a8083063 Iustin Pop
    not be called.
225 a8083063 Iustin Pop

226 a8083063 Iustin Pop
    """
227 a8083063 Iustin Pop
    raise NotImplementedError
228 a8083063 Iustin Pop
229 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
230 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
231 1fce5219 Guido Trotter

232 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
233 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
234 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
235 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
236 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
237 1fce5219 Guido Trotter

238 1fce5219 Guido Trotter
    Args:
239 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
240 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
241 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
242 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
243 1fce5219 Guido Trotter

244 1fce5219 Guido Trotter
    """
245 1fce5219 Guido Trotter
    return lu_result
246 1fce5219 Guido Trotter
247 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
248 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
249 43905206 Guido Trotter

250 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
251 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
252 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
253 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
254 43905206 Guido Trotter
    before.
255 43905206 Guido Trotter

256 43905206 Guido Trotter
    """
257 43905206 Guido Trotter
    if self.needed_locks is None:
258 43905206 Guido Trotter
      self.needed_locks = {}
259 43905206 Guido Trotter
    else:
260 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
261 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
262 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
263 43905206 Guido Trotter
    if expanded_name is None:
264 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
265 43905206 Guido Trotter
                                  self.op.instance_name)
266 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
267 43905206 Guido Trotter
    self.op.instance_name = expanded_name
268 43905206 Guido Trotter
269 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
270 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
271 c4a2fee1 Guido Trotter

272 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
273 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
274 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
275 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
276 c4a2fee1 Guido Trotter

277 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
278 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
279 c4a2fee1 Guido Trotter

280 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
281 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
282 c4a2fee1 Guido Trotter

283 c4a2fee1 Guido Trotter
    If should be called in DeclareLocks in a way similar to:
284 c4a2fee1 Guido Trotter

285 c4a2fee1 Guido Trotter
    if level == locking.LEVEL_NODE:
286 c4a2fee1 Guido Trotter
      self._LockInstancesNodes()
287 c4a2fee1 Guido Trotter

288 a82ce292 Guido Trotter
    @type primary_only: boolean
289 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
290 a82ce292 Guido Trotter

291 c4a2fee1 Guido Trotter
    """
292 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
293 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
294 c4a2fee1 Guido Trotter
295 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
296 c4a2fee1 Guido Trotter
297 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
298 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
299 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
300 c4a2fee1 Guido Trotter
    wanted_nodes = []
301 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
302 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
303 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
304 a82ce292 Guido Trotter
      if not primary_only:
305 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
306 9513b6ab Guido Trotter
307 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
308 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
309 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
310 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
311 c4a2fee1 Guido Trotter
312 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
313 c4a2fee1 Guido Trotter
314 a8083063 Iustin Pop
315 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
316 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
317 a8083063 Iustin Pop

318 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
319 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
320 a8083063 Iustin Pop

321 a8083063 Iustin Pop
  """
322 a8083063 Iustin Pop
  HPATH = None
323 a8083063 Iustin Pop
  HTYPE = None
324 a8083063 Iustin Pop
325 a8083063 Iustin Pop
326 6de7c41d Iustin Pop
class _FieldSet(object):
327 6de7c41d Iustin Pop
  """A simple field set.
328 6de7c41d Iustin Pop

329 6de7c41d Iustin Pop
  Among the features are:
330 6de7c41d Iustin Pop
    - checking if a string is among a list of static string or regex objects
331 6de7c41d Iustin Pop
    - checking if a whole list of string matches
332 6de7c41d Iustin Pop
    - returning the matching groups from a regex match
333 6de7c41d Iustin Pop

334 6de7c41d Iustin Pop
  Internally, all fields are held as regular expression objects.
335 6de7c41d Iustin Pop

336 6de7c41d Iustin Pop
  """
337 6de7c41d Iustin Pop
  def __init__(self, *items):
338 6de7c41d Iustin Pop
    self.items = [re.compile("^%s$" % value) for value in items]
339 6de7c41d Iustin Pop
340 6de7c41d Iustin Pop
  def Extend(self, other_set):
341 6de7c41d Iustin Pop
    """Extend the field set with the items from another one"""
342 6de7c41d Iustin Pop
    self.items.extend(other_set.items)
343 6de7c41d Iustin Pop
344 6de7c41d Iustin Pop
  def Matches(self, field):
345 6de7c41d Iustin Pop
    """Checks if a field matches the current set
346 6de7c41d Iustin Pop

347 6de7c41d Iustin Pop
    @type field: str
348 6de7c41d Iustin Pop
    @param field: the string to match
349 6de7c41d Iustin Pop
    @return: either False or a regular expression match object
350 6de7c41d Iustin Pop

351 6de7c41d Iustin Pop
    """
352 6de7c41d Iustin Pop
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
353 6de7c41d Iustin Pop
      return m
354 6de7c41d Iustin Pop
    return False
355 6de7c41d Iustin Pop
356 6de7c41d Iustin Pop
  def NonMatching(self, items):
357 6de7c41d Iustin Pop
    """Returns the list of fields not matching the current set
358 6de7c41d Iustin Pop

359 6de7c41d Iustin Pop
    @type items: list
360 6de7c41d Iustin Pop
    @param items: the list of fields to check
361 6de7c41d Iustin Pop
    @rtype: list
362 6de7c41d Iustin Pop
    @return: list of non-matching fields
363 6de7c41d Iustin Pop

364 6de7c41d Iustin Pop
    """
365 6de7c41d Iustin Pop
    return [val for val in items if not self.Matches(val)]
366 6de7c41d Iustin Pop
367 6de7c41d Iustin Pop
368 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
369 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
370 83120a01 Michael Hanselmann

371 83120a01 Michael Hanselmann
  Args:
372 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
373 83120a01 Michael Hanselmann

374 83120a01 Michael Hanselmann
  """
375 3312b702 Iustin Pop
  if not isinstance(nodes, list):
376 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
377 dcb93971 Michael Hanselmann
378 ea47808a Guido Trotter
  if not nodes:
379 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
380 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
381 dcb93971 Michael Hanselmann
382 ea47808a Guido Trotter
  wanted = []
383 ea47808a Guido Trotter
  for name in nodes:
384 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
385 ea47808a Guido Trotter
    if node is None:
386 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
387 ea47808a Guido Trotter
    wanted.append(node)
388 dcb93971 Michael Hanselmann
389 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
390 3312b702 Iustin Pop
391 3312b702 Iustin Pop
392 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
393 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
394 3312b702 Iustin Pop

395 3312b702 Iustin Pop
  Args:
396 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
397 3312b702 Iustin Pop

398 3312b702 Iustin Pop
  """
399 3312b702 Iustin Pop
  if not isinstance(instances, list):
400 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
401 3312b702 Iustin Pop
402 3312b702 Iustin Pop
  if instances:
403 3312b702 Iustin Pop
    wanted = []
404 3312b702 Iustin Pop
405 3312b702 Iustin Pop
    for name in instances:
406 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
407 3312b702 Iustin Pop
      if instance is None:
408 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
409 3312b702 Iustin Pop
      wanted.append(instance)
410 3312b702 Iustin Pop
411 3312b702 Iustin Pop
  else:
412 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
413 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
414 dcb93971 Michael Hanselmann
415 dcb93971 Michael Hanselmann
416 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
417 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
418 83120a01 Michael Hanselmann

419 31bf511f Iustin Pop
  @type static: L{_FieldSet}
420 31bf511f Iustin Pop
  @param static: static fields set
421 31bf511f Iustin Pop
  @type dynamic: L{_FieldSet}
422 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
423 83120a01 Michael Hanselmann

424 83120a01 Michael Hanselmann
  """
425 31bf511f Iustin Pop
  f = _FieldSet()
426 31bf511f Iustin Pop
  f.Extend(static)
427 31bf511f Iustin Pop
  f.Extend(dynamic)
428 dcb93971 Michael Hanselmann
429 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
430 31bf511f Iustin Pop
  if delta:
431 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
432 31bf511f Iustin Pop
                               % ",".join(delta))
433 dcb93971 Michael Hanselmann
434 dcb93971 Michael Hanselmann
435 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
436 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
437 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
438 ecb215b5 Michael Hanselmann

439 ecb215b5 Michael Hanselmann
  Args:
440 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
441 396e1b78 Michael Hanselmann
  """
442 396e1b78 Michael Hanselmann
  env = {
443 0e137c28 Iustin Pop
    "OP_TARGET": name,
444 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
445 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
446 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
447 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
448 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
449 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
450 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
451 396e1b78 Michael Hanselmann
  }
452 396e1b78 Michael Hanselmann
453 396e1b78 Michael Hanselmann
  if nics:
454 396e1b78 Michael Hanselmann
    nic_count = len(nics)
455 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
456 396e1b78 Michael Hanselmann
      if ip is None:
457 396e1b78 Michael Hanselmann
        ip = ""
458 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
459 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
460 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
461 396e1b78 Michael Hanselmann
  else:
462 396e1b78 Michael Hanselmann
    nic_count = 0
463 396e1b78 Michael Hanselmann
464 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
465 396e1b78 Michael Hanselmann
466 396e1b78 Michael Hanselmann
  return env
467 396e1b78 Michael Hanselmann
468 396e1b78 Michael Hanselmann
469 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
470 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
471 ecb215b5 Michael Hanselmann

472 ecb215b5 Michael Hanselmann
  Args:
473 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
474 ecb215b5 Michael Hanselmann
    override: dict of values to override
475 ecb215b5 Michael Hanselmann
  """
476 338e51e8 Iustin Pop
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
477 396e1b78 Michael Hanselmann
  args = {
478 396e1b78 Michael Hanselmann
    'name': instance.name,
479 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
480 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
481 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
482 396e1b78 Michael Hanselmann
    'status': instance.os,
483 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
484 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
485 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
486 396e1b78 Michael Hanselmann
  }
487 396e1b78 Michael Hanselmann
  if override:
488 396e1b78 Michael Hanselmann
    args.update(override)
489 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
490 396e1b78 Michael Hanselmann
491 396e1b78 Michael Hanselmann
492 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
493 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
494 bf6929a2 Alexander Schreiber

495 bf6929a2 Alexander Schreiber
  """
496 bf6929a2 Alexander Schreiber
  # check bridges existance
497 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
498 72737a7f Iustin Pop
  if not lu.rpc.call_bridges_exist(instance.primary_node, brlist):
499 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
500 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
501 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
502 bf6929a2 Alexander Schreiber
503 bf6929a2 Alexander Schreiber
504 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
505 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
506 a8083063 Iustin Pop

507 a8083063 Iustin Pop
  """
508 a8083063 Iustin Pop
  _OP_REQP = []
509 a8083063 Iustin Pop
510 a8083063 Iustin Pop
  def CheckPrereq(self):
511 a8083063 Iustin Pop
    """Check prerequisites.
512 a8083063 Iustin Pop

513 a8083063 Iustin Pop
    This checks whether the cluster is empty.
514 a8083063 Iustin Pop

515 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
516 a8083063 Iustin Pop

517 a8083063 Iustin Pop
    """
518 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
519 a8083063 Iustin Pop
520 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
521 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
522 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
523 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
524 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
525 db915bd1 Michael Hanselmann
    if instancelist:
526 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
527 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
528 a8083063 Iustin Pop
529 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
530 a8083063 Iustin Pop
    """Destroys the cluster.
531 a8083063 Iustin Pop

532 a8083063 Iustin Pop
    """
533 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
534 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
535 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
536 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
537 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
538 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
539 140aa4a8 Iustin Pop
    return master
540 a8083063 Iustin Pop
541 a8083063 Iustin Pop
542 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
543 a8083063 Iustin Pop
  """Verifies the cluster status.
544 a8083063 Iustin Pop

545 a8083063 Iustin Pop
  """
546 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
547 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
548 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
549 d4b9d97f Guido Trotter
  REQ_BGL = False
550 d4b9d97f Guido Trotter
551 d4b9d97f Guido Trotter
  def ExpandNames(self):
552 d4b9d97f Guido Trotter
    self.needed_locks = {
553 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
554 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
555 d4b9d97f Guido Trotter
    }
556 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
557 a8083063 Iustin Pop
558 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
559 a8083063 Iustin Pop
                  remote_version, feedback_fn):
560 a8083063 Iustin Pop
    """Run multiple tests against a node.
561 a8083063 Iustin Pop

562 a8083063 Iustin Pop
    Test list:
563 a8083063 Iustin Pop
      - compares ganeti version
564 a8083063 Iustin Pop
      - checks vg existance and size > 20G
565 a8083063 Iustin Pop
      - checks config file checksum
566 a8083063 Iustin Pop
      - checks ssh to other nodes
567 a8083063 Iustin Pop

568 a8083063 Iustin Pop
    Args:
569 a8083063 Iustin Pop
      node: name of the node to check
570 a8083063 Iustin Pop
      file_list: required list of files
571 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
572 098c0958 Michael Hanselmann

573 a8083063 Iustin Pop
    """
574 a8083063 Iustin Pop
    # compares ganeti version
575 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
576 a8083063 Iustin Pop
    if not remote_version:
577 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
578 a8083063 Iustin Pop
      return True
579 a8083063 Iustin Pop
580 a8083063 Iustin Pop
    if local_version != remote_version:
581 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
582 a8083063 Iustin Pop
                      (local_version, node, remote_version))
583 a8083063 Iustin Pop
      return True
584 a8083063 Iustin Pop
585 a8083063 Iustin Pop
    # checks vg existance and size > 20G
586 a8083063 Iustin Pop
587 a8083063 Iustin Pop
    bad = False
588 a8083063 Iustin Pop
    if not vglist:
589 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
590 a8083063 Iustin Pop
                      (node,))
591 a8083063 Iustin Pop
      bad = True
592 a8083063 Iustin Pop
    else:
593 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
594 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
595 a8083063 Iustin Pop
      if vgstatus:
596 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
597 a8083063 Iustin Pop
        bad = True
598 a8083063 Iustin Pop
599 2eb78bc8 Guido Trotter
    if not node_result:
600 2eb78bc8 Guido Trotter
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
601 2eb78bc8 Guido Trotter
      return True
602 2eb78bc8 Guido Trotter
603 a8083063 Iustin Pop
    # checks config file checksum
604 a8083063 Iustin Pop
    # checks ssh to any
605 a8083063 Iustin Pop
606 a8083063 Iustin Pop
    if 'filelist' not in node_result:
607 a8083063 Iustin Pop
      bad = True
608 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
609 a8083063 Iustin Pop
    else:
610 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
611 a8083063 Iustin Pop
      for file_name in file_list:
612 a8083063 Iustin Pop
        if file_name not in remote_cksum:
613 a8083063 Iustin Pop
          bad = True
614 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
615 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
616 a8083063 Iustin Pop
          bad = True
617 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
618 a8083063 Iustin Pop
619 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
620 a8083063 Iustin Pop
      bad = True
621 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
622 a8083063 Iustin Pop
    else:
623 a8083063 Iustin Pop
      if node_result['nodelist']:
624 a8083063 Iustin Pop
        bad = True
625 a8083063 Iustin Pop
        for node in node_result['nodelist']:
626 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
627 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
628 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
629 9d4bfc96 Iustin Pop
      bad = True
630 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
631 9d4bfc96 Iustin Pop
    else:
632 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
633 9d4bfc96 Iustin Pop
        bad = True
634 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
635 9d4bfc96 Iustin Pop
        for node in nlist:
636 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
637 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
638 9d4bfc96 Iustin Pop
639 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
640 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
641 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
642 e69d05fd Iustin Pop
        if hv_result is not None:
643 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
644 e69d05fd Iustin Pop
                      (hv_name, hv_result))
645 a8083063 Iustin Pop
    return bad
646 a8083063 Iustin Pop
647 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
648 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
649 a8083063 Iustin Pop
    """Verify an instance.
650 a8083063 Iustin Pop

651 a8083063 Iustin Pop
    This function checks to see if the required block devices are
652 a8083063 Iustin Pop
    available on the instance's node.
653 a8083063 Iustin Pop

654 a8083063 Iustin Pop
    """
655 a8083063 Iustin Pop
    bad = False
656 a8083063 Iustin Pop
657 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
658 a8083063 Iustin Pop
659 a8083063 Iustin Pop
    node_vol_should = {}
660 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
661 a8083063 Iustin Pop
662 a8083063 Iustin Pop
    for node in node_vol_should:
663 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
664 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
665 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
666 a8083063 Iustin Pop
                          (volume, node))
667 a8083063 Iustin Pop
          bad = True
668 a8083063 Iustin Pop
669 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
670 a872dae6 Guido Trotter
      if (node_current not in node_instance or
671 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
672 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
673 a8083063 Iustin Pop
                        (instance, node_current))
674 a8083063 Iustin Pop
        bad = True
675 a8083063 Iustin Pop
676 a8083063 Iustin Pop
    for node in node_instance:
677 a8083063 Iustin Pop
      if (not node == node_current):
678 a8083063 Iustin Pop
        if instance in node_instance[node]:
679 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
680 a8083063 Iustin Pop
                          (instance, node))
681 a8083063 Iustin Pop
          bad = True
682 a8083063 Iustin Pop
683 6a438c98 Michael Hanselmann
    return bad
684 a8083063 Iustin Pop
685 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
686 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
687 a8083063 Iustin Pop

688 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
689 a8083063 Iustin Pop
    reported as unknown.
690 a8083063 Iustin Pop

691 a8083063 Iustin Pop
    """
692 a8083063 Iustin Pop
    bad = False
693 a8083063 Iustin Pop
694 a8083063 Iustin Pop
    for node in node_vol_is:
695 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
696 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
697 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
698 a8083063 Iustin Pop
                      (volume, node))
699 a8083063 Iustin Pop
          bad = True
700 a8083063 Iustin Pop
    return bad
701 a8083063 Iustin Pop
702 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
703 a8083063 Iustin Pop
    """Verify the list of running instances.
704 a8083063 Iustin Pop

705 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
706 a8083063 Iustin Pop

707 a8083063 Iustin Pop
    """
708 a8083063 Iustin Pop
    bad = False
709 a8083063 Iustin Pop
    for node in node_instance:
710 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
711 a8083063 Iustin Pop
        if runninginstance not in instancelist:
712 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
713 a8083063 Iustin Pop
                          (runninginstance, node))
714 a8083063 Iustin Pop
          bad = True
715 a8083063 Iustin Pop
    return bad
716 a8083063 Iustin Pop
717 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
718 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
719 2b3b6ddd Guido Trotter

720 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
721 2b3b6ddd Guido Trotter
    was primary for.
722 2b3b6ddd Guido Trotter

723 2b3b6ddd Guido Trotter
    """
724 2b3b6ddd Guido Trotter
    bad = False
725 2b3b6ddd Guido Trotter
726 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
727 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
728 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
729 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
730 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
731 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
732 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
733 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
734 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
735 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
736 2b3b6ddd Guido Trotter
        needed_mem = 0
737 2b3b6ddd Guido Trotter
        for instance in instances:
738 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
739 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
740 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
741 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
742 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
743 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
744 2b3b6ddd Guido Trotter
          bad = True
745 2b3b6ddd Guido Trotter
    return bad
746 2b3b6ddd Guido Trotter
747 a8083063 Iustin Pop
  def CheckPrereq(self):
748 a8083063 Iustin Pop
    """Check prerequisites.
749 a8083063 Iustin Pop

750 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
751 e54c4c5e Guido Trotter
    all its members are valid.
752 a8083063 Iustin Pop

753 a8083063 Iustin Pop
    """
754 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
755 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
756 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
757 a8083063 Iustin Pop
758 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
759 d8fff41c Guido Trotter
    """Build hooks env.
760 d8fff41c Guido Trotter

761 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
762 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
763 d8fff41c Guido Trotter

764 d8fff41c Guido Trotter
    """
765 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
766 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
767 d8fff41c Guido Trotter
    env = {}
768 d8fff41c Guido Trotter
    return env, [], all_nodes
769 d8fff41c Guido Trotter
770 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
771 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
772 a8083063 Iustin Pop

773 a8083063 Iustin Pop
    """
774 a8083063 Iustin Pop
    bad = False
775 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
776 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
777 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
778 a8083063 Iustin Pop
779 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
780 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
781 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
782 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
783 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
784 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
785 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
786 a8083063 Iustin Pop
    node_volume = {}
787 a8083063 Iustin Pop
    node_instance = {}
788 9c9c7d30 Guido Trotter
    node_info = {}
789 26b6af5e Guido Trotter
    instance_cfg = {}
790 a8083063 Iustin Pop
791 a8083063 Iustin Pop
    # FIXME: verify OS list
792 a8083063 Iustin Pop
    # do local checksums
793 d6a02168 Michael Hanselmann
    file_names = []
794 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
795 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
796 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
797 a8083063 Iustin Pop
798 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
799 72737a7f Iustin Pop
    all_volumeinfo = self.rpc.call_volume_list(nodelist, vg_name)
800 72737a7f Iustin Pop
    all_instanceinfo = self.rpc.call_instance_list(nodelist, hypervisors)
801 72737a7f Iustin Pop
    all_vglist = self.rpc.call_vg_list(nodelist)
802 a8083063 Iustin Pop
    node_verify_param = {
803 a8083063 Iustin Pop
      'filelist': file_names,
804 a8083063 Iustin Pop
      'nodelist': nodelist,
805 e69d05fd Iustin Pop
      'hypervisor': hypervisors,
806 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
807 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
808 a8083063 Iustin Pop
      }
809 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
810 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
811 72737a7f Iustin Pop
    all_rversion = self.rpc.call_version(nodelist)
812 72737a7f Iustin Pop
    all_ninfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(),
813 72737a7f Iustin Pop
                                        self.cfg.GetHypervisorType())
814 a8083063 Iustin Pop
815 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
816 a8083063 Iustin Pop
    for node in nodelist:
817 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
818 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
819 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
820 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
821 a8083063 Iustin Pop
      bad = bad or result
822 a8083063 Iustin Pop
823 a8083063 Iustin Pop
      # node_volume
824 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
825 a8083063 Iustin Pop
826 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
827 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
828 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
829 b63ed789 Iustin Pop
        bad = True
830 b63ed789 Iustin Pop
        node_volume[node] = {}
831 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
832 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
833 a8083063 Iustin Pop
        bad = True
834 a8083063 Iustin Pop
        continue
835 b63ed789 Iustin Pop
      else:
836 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
837 a8083063 Iustin Pop
838 a8083063 Iustin Pop
      # node_instance
839 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
840 a8083063 Iustin Pop
      if type(nodeinstance) != list:
841 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
842 a8083063 Iustin Pop
        bad = True
843 a8083063 Iustin Pop
        continue
844 a8083063 Iustin Pop
845 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
846 a8083063 Iustin Pop
847 9c9c7d30 Guido Trotter
      # node_info
848 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
849 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
850 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
851 9c9c7d30 Guido Trotter
        bad = True
852 9c9c7d30 Guido Trotter
        continue
853 9c9c7d30 Guido Trotter
854 9c9c7d30 Guido Trotter
      try:
855 9c9c7d30 Guido Trotter
        node_info[node] = {
856 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
857 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
858 93e4c50b Guido Trotter
          "pinst": [],
859 93e4c50b Guido Trotter
          "sinst": [],
860 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
861 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
862 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
863 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
864 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
865 36e7da50 Guido Trotter
          # secondary.
866 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
867 9c9c7d30 Guido Trotter
        }
868 9c9c7d30 Guido Trotter
      except ValueError:
869 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
870 9c9c7d30 Guido Trotter
        bad = True
871 9c9c7d30 Guido Trotter
        continue
872 9c9c7d30 Guido Trotter
873 a8083063 Iustin Pop
    node_vol_should = {}
874 a8083063 Iustin Pop
875 a8083063 Iustin Pop
    for instance in instancelist:
876 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
877 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
878 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
879 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
880 c5705f58 Guido Trotter
      bad = bad or result
881 a8083063 Iustin Pop
882 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
883 a8083063 Iustin Pop
884 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
885 26b6af5e Guido Trotter
886 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
887 93e4c50b Guido Trotter
      if pnode in node_info:
888 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
889 93e4c50b Guido Trotter
      else:
890 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
891 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
892 93e4c50b Guido Trotter
        bad = True
893 93e4c50b Guido Trotter
894 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
895 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
896 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
897 93e4c50b Guido Trotter
      # supported either.
898 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
899 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
900 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
901 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
902 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
903 93e4c50b Guido Trotter
                    % instance)
904 93e4c50b Guido Trotter
905 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
906 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
907 3924700f Iustin Pop
908 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
909 93e4c50b Guido Trotter
        if snode in node_info:
910 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
911 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
912 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
913 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
914 93e4c50b Guido Trotter
        else:
915 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
916 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
917 93e4c50b Guido Trotter
918 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
919 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
920 a8083063 Iustin Pop
                                       feedback_fn)
921 a8083063 Iustin Pop
    bad = bad or result
922 a8083063 Iustin Pop
923 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
924 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
925 a8083063 Iustin Pop
                                         feedback_fn)
926 a8083063 Iustin Pop
    bad = bad or result
927 a8083063 Iustin Pop
928 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
929 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
930 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
931 e54c4c5e Guido Trotter
      bad = bad or result
932 2b3b6ddd Guido Trotter
933 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
934 2b3b6ddd Guido Trotter
    if i_non_redundant:
935 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
936 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
937 2b3b6ddd Guido Trotter
938 3924700f Iustin Pop
    if i_non_a_balanced:
939 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
940 3924700f Iustin Pop
                  % len(i_non_a_balanced))
941 3924700f Iustin Pop
942 34290825 Michael Hanselmann
    return not bad
943 a8083063 Iustin Pop
944 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
945 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
946 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
947 d8fff41c Guido Trotter

948 d8fff41c Guido Trotter
    Args:
949 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
950 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
951 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
952 d8fff41c Guido Trotter
      lu_result: previous Exec result
953 d8fff41c Guido Trotter

954 d8fff41c Guido Trotter
    """
955 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
956 38206f3c Iustin Pop
    # their results
957 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
958 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
959 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
960 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
961 d8fff41c Guido Trotter
      if not hooks_results:
962 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
963 d8fff41c Guido Trotter
        lu_result = 1
964 d8fff41c Guido Trotter
      else:
965 d8fff41c Guido Trotter
        for node_name in hooks_results:
966 d8fff41c Guido Trotter
          show_node_header = True
967 d8fff41c Guido Trotter
          res = hooks_results[node_name]
968 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
969 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
970 d8fff41c Guido Trotter
            lu_result = 1
971 d8fff41c Guido Trotter
            continue
972 d8fff41c Guido Trotter
          for script, hkr, output in res:
973 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
974 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
975 d8fff41c Guido Trotter
              # failing hooks on that node
976 d8fff41c Guido Trotter
              if show_node_header:
977 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
978 d8fff41c Guido Trotter
                show_node_header = False
979 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
980 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
981 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
982 d8fff41c Guido Trotter
              lu_result = 1
983 d8fff41c Guido Trotter
984 d8fff41c Guido Trotter
      return lu_result
985 d8fff41c Guido Trotter
986 a8083063 Iustin Pop
987 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
988 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
989 2c95a8d4 Iustin Pop

990 2c95a8d4 Iustin Pop
  """
991 2c95a8d4 Iustin Pop
  _OP_REQP = []
992 d4b9d97f Guido Trotter
  REQ_BGL = False
993 d4b9d97f Guido Trotter
994 d4b9d97f Guido Trotter
  def ExpandNames(self):
995 d4b9d97f Guido Trotter
    self.needed_locks = {
996 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
997 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
998 d4b9d97f Guido Trotter
    }
999 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1000 2c95a8d4 Iustin Pop
1001 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1002 2c95a8d4 Iustin Pop
    """Check prerequisites.
1003 2c95a8d4 Iustin Pop

1004 2c95a8d4 Iustin Pop
    This has no prerequisites.
1005 2c95a8d4 Iustin Pop

1006 2c95a8d4 Iustin Pop
    """
1007 2c95a8d4 Iustin Pop
    pass
1008 2c95a8d4 Iustin Pop
1009 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1010 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1011 2c95a8d4 Iustin Pop

1012 2c95a8d4 Iustin Pop
    """
1013 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1014 2c95a8d4 Iustin Pop
1015 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1016 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1017 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1018 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1019 2c95a8d4 Iustin Pop
1020 2c95a8d4 Iustin Pop
    nv_dict = {}
1021 2c95a8d4 Iustin Pop
    for inst in instances:
1022 2c95a8d4 Iustin Pop
      inst_lvs = {}
1023 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
1024 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1025 2c95a8d4 Iustin Pop
        continue
1026 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1027 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1028 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1029 2c95a8d4 Iustin Pop
        for vol in vol_list:
1030 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1031 2c95a8d4 Iustin Pop
1032 2c95a8d4 Iustin Pop
    if not nv_dict:
1033 2c95a8d4 Iustin Pop
      return result
1034 2c95a8d4 Iustin Pop
1035 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1036 2c95a8d4 Iustin Pop
1037 2c95a8d4 Iustin Pop
    to_act = set()
1038 2c95a8d4 Iustin Pop
    for node in nodes:
1039 2c95a8d4 Iustin Pop
      # node_volume
1040 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1041 2c95a8d4 Iustin Pop
1042 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1043 9a4f63d1 Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1044 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1045 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1046 9a4f63d1 Iustin Pop
        logging.warning("Connection to node %s failed or invalid data"
1047 9a4f63d1 Iustin Pop
                        " returned", node)
1048 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1049 2c95a8d4 Iustin Pop
        continue
1050 2c95a8d4 Iustin Pop
1051 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1052 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1053 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1054 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1055 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1056 2c95a8d4 Iustin Pop
1057 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1058 b63ed789 Iustin Pop
    # data better
1059 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1060 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1061 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1062 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1063 b63ed789 Iustin Pop
1064 2c95a8d4 Iustin Pop
    return result
1065 2c95a8d4 Iustin Pop
1066 2c95a8d4 Iustin Pop
1067 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1068 07bd8a51 Iustin Pop
  """Rename the cluster.
1069 07bd8a51 Iustin Pop

1070 07bd8a51 Iustin Pop
  """
1071 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1072 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1073 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1074 07bd8a51 Iustin Pop
1075 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1076 07bd8a51 Iustin Pop
    """Build hooks env.
1077 07bd8a51 Iustin Pop

1078 07bd8a51 Iustin Pop
    """
1079 07bd8a51 Iustin Pop
    env = {
1080 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1081 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1082 07bd8a51 Iustin Pop
      }
1083 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1084 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1085 07bd8a51 Iustin Pop
1086 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1087 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1088 07bd8a51 Iustin Pop

1089 07bd8a51 Iustin Pop
    """
1090 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1091 07bd8a51 Iustin Pop
1092 bcf043c9 Iustin Pop
    new_name = hostname.name
1093 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1094 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1095 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1096 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1097 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1098 07bd8a51 Iustin Pop
                                 " cluster has changed")
1099 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1100 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1101 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1102 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1103 07bd8a51 Iustin Pop
                                   new_ip)
1104 07bd8a51 Iustin Pop
1105 07bd8a51 Iustin Pop
    self.op.name = new_name
1106 07bd8a51 Iustin Pop
1107 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1108 07bd8a51 Iustin Pop
    """Rename the cluster.
1109 07bd8a51 Iustin Pop

1110 07bd8a51 Iustin Pop
    """
1111 07bd8a51 Iustin Pop
    clustername = self.op.name
1112 07bd8a51 Iustin Pop
    ip = self.ip
1113 07bd8a51 Iustin Pop
1114 07bd8a51 Iustin Pop
    # shutdown the master IP
1115 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1116 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
1117 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1118 07bd8a51 Iustin Pop
1119 07bd8a51 Iustin Pop
    try:
1120 07bd8a51 Iustin Pop
      # modify the sstore
1121 d6a02168 Michael Hanselmann
      # TODO: sstore
1122 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1123 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1124 07bd8a51 Iustin Pop
1125 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1126 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1127 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1128 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1129 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1130 07bd8a51 Iustin Pop
1131 9a4f63d1 Iustin Pop
      logging.debug("Copying updated ssconf data to all nodes")
1132 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1133 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1134 72737a7f Iustin Pop
        result = self.rpc.call_upload_file(dist_nodes, fname)
1135 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1136 07bd8a51 Iustin Pop
          if not result[to_node]:
1137 86d9d3bb Iustin Pop
            self.LogWarning("Copy of file %s to node %s failed",
1138 86d9d3bb Iustin Pop
                            fname, to_node)
1139 07bd8a51 Iustin Pop
    finally:
1140 72737a7f Iustin Pop
      if not self.rpc.call_node_start_master(master, False):
1141 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1142 86d9d3bb Iustin Pop
                        " the master, please restart manually.")
1143 07bd8a51 Iustin Pop
1144 07bd8a51 Iustin Pop
1145 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1146 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1147 8084f9f6 Manuel Franceschini

1148 8084f9f6 Manuel Franceschini
  Args:
1149 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1150 8084f9f6 Manuel Franceschini

1151 8084f9f6 Manuel Franceschini
  Returns:
1152 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1153 8084f9f6 Manuel Franceschini

1154 8084f9f6 Manuel Franceschini
  """
1155 8084f9f6 Manuel Franceschini
  if disk.children:
1156 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1157 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1158 8084f9f6 Manuel Franceschini
        return True
1159 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1160 8084f9f6 Manuel Franceschini
1161 8084f9f6 Manuel Franceschini
1162 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1163 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1164 8084f9f6 Manuel Franceschini

1165 8084f9f6 Manuel Franceschini
  """
1166 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1167 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1168 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1169 c53279cf Guido Trotter
  REQ_BGL = False
1170 c53279cf Guido Trotter
1171 c53279cf Guido Trotter
  def ExpandNames(self):
1172 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1173 c53279cf Guido Trotter
    # all nodes to be modified.
1174 c53279cf Guido Trotter
    self.needed_locks = {
1175 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1176 c53279cf Guido Trotter
    }
1177 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1178 8084f9f6 Manuel Franceschini
1179 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1180 8084f9f6 Manuel Franceschini
    """Build hooks env.
1181 8084f9f6 Manuel Franceschini

1182 8084f9f6 Manuel Franceschini
    """
1183 8084f9f6 Manuel Franceschini
    env = {
1184 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1185 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1186 8084f9f6 Manuel Franceschini
      }
1187 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1188 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1189 8084f9f6 Manuel Franceschini
1190 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1191 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1192 8084f9f6 Manuel Franceschini

1193 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1194 5f83e263 Iustin Pop
    if the given volume group is valid.
1195 8084f9f6 Manuel Franceschini

1196 8084f9f6 Manuel Franceschini
    """
1197 c53279cf Guido Trotter
    # FIXME: This only works because there is only one parameter that can be
1198 c53279cf Guido Trotter
    # changed or removed.
1199 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1200 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1201 8084f9f6 Manuel Franceschini
      for inst in instances:
1202 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1203 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1204 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1205 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1206 8084f9f6 Manuel Franceschini
1207 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1208 779c15bb Iustin Pop
1209 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1210 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1211 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1212 8084f9f6 Manuel Franceschini
      for node in node_list:
1213 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1214 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1215 8084f9f6 Manuel Franceschini
        if vgstatus:
1216 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1217 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1218 8084f9f6 Manuel Franceschini
1219 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1220 779c15bb Iustin Pop
    # beparams changes do not need validation (we can't validate?),
1221 779c15bb Iustin Pop
    # but we still process here
1222 779c15bb Iustin Pop
    if self.op.beparams:
1223 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1224 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1225 779c15bb Iustin Pop
1226 779c15bb Iustin Pop
    # hypervisor list/parameters
1227 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1228 779c15bb Iustin Pop
    if self.op.hvparams:
1229 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1230 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1231 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1232 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1233 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1234 779c15bb Iustin Pop
        else:
1235 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1236 779c15bb Iustin Pop
1237 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1238 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1239 779c15bb Iustin Pop
    else:
1240 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1241 779c15bb Iustin Pop
1242 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1243 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1244 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1245 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1246 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1247 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1248 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1249 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1250 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1251 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1252 779c15bb Iustin Pop
1253 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1254 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1255 8084f9f6 Manuel Franceschini

1256 8084f9f6 Manuel Franceschini
    """
1257 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1258 779c15bb Iustin Pop
      if self.op.vg_name != self.cfg.GetVGName():
1259 779c15bb Iustin Pop
        self.cfg.SetVGName(self.op.vg_name)
1260 779c15bb Iustin Pop
      else:
1261 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1262 779c15bb Iustin Pop
                    " state, not changing")
1263 779c15bb Iustin Pop
    if self.op.hvparams:
1264 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1265 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1266 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1267 779c15bb Iustin Pop
    if self.op.beparams:
1268 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1269 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1270 8084f9f6 Manuel Franceschini
1271 8084f9f6 Manuel Franceschini
1272 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1273 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1274 a8083063 Iustin Pop

1275 a8083063 Iustin Pop
  """
1276 a8083063 Iustin Pop
  if not instance.disks:
1277 a8083063 Iustin Pop
    return True
1278 a8083063 Iustin Pop
1279 a8083063 Iustin Pop
  if not oneshot:
1280 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1281 a8083063 Iustin Pop
1282 a8083063 Iustin Pop
  node = instance.primary_node
1283 a8083063 Iustin Pop
1284 a8083063 Iustin Pop
  for dev in instance.disks:
1285 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1286 a8083063 Iustin Pop
1287 a8083063 Iustin Pop
  retries = 0
1288 a8083063 Iustin Pop
  while True:
1289 a8083063 Iustin Pop
    max_time = 0
1290 a8083063 Iustin Pop
    done = True
1291 a8083063 Iustin Pop
    cumul_degraded = False
1292 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1293 a8083063 Iustin Pop
    if not rstats:
1294 86d9d3bb Iustin Pop
      lu.LogWarning("Can't get any data from node %s", node)
1295 a8083063 Iustin Pop
      retries += 1
1296 a8083063 Iustin Pop
      if retries >= 10:
1297 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1298 3ecf6786 Iustin Pop
                                 " aborting." % node)
1299 a8083063 Iustin Pop
      time.sleep(6)
1300 a8083063 Iustin Pop
      continue
1301 a8083063 Iustin Pop
    retries = 0
1302 a8083063 Iustin Pop
    for i in range(len(rstats)):
1303 a8083063 Iustin Pop
      mstat = rstats[i]
1304 a8083063 Iustin Pop
      if mstat is None:
1305 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1306 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1307 a8083063 Iustin Pop
        continue
1308 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1309 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1310 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1311 a8083063 Iustin Pop
      if perc_done is not None:
1312 a8083063 Iustin Pop
        done = False
1313 a8083063 Iustin Pop
        if est_time is not None:
1314 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1315 a8083063 Iustin Pop
          max_time = est_time
1316 a8083063 Iustin Pop
        else:
1317 a8083063 Iustin Pop
          rem_time = "no time estimate"
1318 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1319 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1320 a8083063 Iustin Pop
    if done or oneshot:
1321 a8083063 Iustin Pop
      break
1322 a8083063 Iustin Pop
1323 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1324 a8083063 Iustin Pop
1325 a8083063 Iustin Pop
  if done:
1326 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1327 a8083063 Iustin Pop
  return not cumul_degraded
1328 a8083063 Iustin Pop
1329 a8083063 Iustin Pop
1330 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1331 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1332 a8083063 Iustin Pop

1333 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1334 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1335 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1336 0834c866 Iustin Pop

1337 a8083063 Iustin Pop
  """
1338 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1339 0834c866 Iustin Pop
  if ldisk:
1340 0834c866 Iustin Pop
    idx = 6
1341 0834c866 Iustin Pop
  else:
1342 0834c866 Iustin Pop
    idx = 5
1343 a8083063 Iustin Pop
1344 a8083063 Iustin Pop
  result = True
1345 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1346 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1347 a8083063 Iustin Pop
    if not rstats:
1348 9a4f63d1 Iustin Pop
      logging.warning("Node %s: disk degraded, not found or node down", node)
1349 a8083063 Iustin Pop
      result = False
1350 a8083063 Iustin Pop
    else:
1351 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1352 a8083063 Iustin Pop
  if dev.children:
1353 a8083063 Iustin Pop
    for child in dev.children:
1354 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1355 a8083063 Iustin Pop
1356 a8083063 Iustin Pop
  return result
1357 a8083063 Iustin Pop
1358 a8083063 Iustin Pop
1359 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1360 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1361 a8083063 Iustin Pop

1362 a8083063 Iustin Pop
  """
1363 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1364 6bf01bbb Guido Trotter
  REQ_BGL = False
1365 31bf511f Iustin Pop
  _FIELDS_STATIC = _FieldSet()
1366 31bf511f Iustin Pop
  _FIELDS_DYNAMIC = _FieldSet("name", "valid", "node_status")
1367 a8083063 Iustin Pop
1368 6bf01bbb Guido Trotter
  def ExpandNames(self):
1369 1f9430d6 Iustin Pop
    if self.op.names:
1370 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1371 1f9430d6 Iustin Pop
1372 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1373 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1374 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1375 1f9430d6 Iustin Pop
1376 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1377 6bf01bbb Guido Trotter
    self.needed_locks = {}
1378 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1379 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1380 6bf01bbb Guido Trotter
1381 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1382 6bf01bbb Guido Trotter
    """Check prerequisites.
1383 6bf01bbb Guido Trotter

1384 6bf01bbb Guido Trotter
    """
1385 6bf01bbb Guido Trotter
1386 1f9430d6 Iustin Pop
  @staticmethod
1387 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1388 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1389 1f9430d6 Iustin Pop

1390 1f9430d6 Iustin Pop
      Args:
1391 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1392 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1393 1f9430d6 Iustin Pop

1394 1f9430d6 Iustin Pop
      Returns:
1395 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1396 1f9430d6 Iustin Pop
             nodes as
1397 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1398 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1399 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1400 1f9430d6 Iustin Pop
                  }
1401 1f9430d6 Iustin Pop

1402 1f9430d6 Iustin Pop
    """
1403 1f9430d6 Iustin Pop
    all_os = {}
1404 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1405 1f9430d6 Iustin Pop
      if not nr:
1406 1f9430d6 Iustin Pop
        continue
1407 b4de68a9 Iustin Pop
      for os_obj in nr:
1408 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1409 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1410 1f9430d6 Iustin Pop
          # for each node in node_list
1411 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1412 1f9430d6 Iustin Pop
          for nname in node_list:
1413 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1414 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1415 1f9430d6 Iustin Pop
    return all_os
1416 a8083063 Iustin Pop
1417 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1418 a8083063 Iustin Pop
    """Compute the list of OSes.
1419 a8083063 Iustin Pop

1420 a8083063 Iustin Pop
    """
1421 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1422 72737a7f Iustin Pop
    node_data = self.rpc.call_os_diagnose(node_list)
1423 a8083063 Iustin Pop
    if node_data == False:
1424 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1425 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1426 1f9430d6 Iustin Pop
    output = []
1427 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1428 1f9430d6 Iustin Pop
      row = []
1429 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1430 1f9430d6 Iustin Pop
        if field == "name":
1431 1f9430d6 Iustin Pop
          val = os_name
1432 1f9430d6 Iustin Pop
        elif field == "valid":
1433 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1434 1f9430d6 Iustin Pop
        elif field == "node_status":
1435 1f9430d6 Iustin Pop
          val = {}
1436 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1437 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1438 1f9430d6 Iustin Pop
        else:
1439 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1440 1f9430d6 Iustin Pop
        row.append(val)
1441 1f9430d6 Iustin Pop
      output.append(row)
1442 1f9430d6 Iustin Pop
1443 1f9430d6 Iustin Pop
    return output
1444 a8083063 Iustin Pop
1445 a8083063 Iustin Pop
1446 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1447 a8083063 Iustin Pop
  """Logical unit for removing a node.
1448 a8083063 Iustin Pop

1449 a8083063 Iustin Pop
  """
1450 a8083063 Iustin Pop
  HPATH = "node-remove"
1451 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1452 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1453 a8083063 Iustin Pop
1454 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1455 a8083063 Iustin Pop
    """Build hooks env.
1456 a8083063 Iustin Pop

1457 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1458 d08869ee Guido Trotter
    node would then be impossible to remove.
1459 a8083063 Iustin Pop

1460 a8083063 Iustin Pop
    """
1461 396e1b78 Michael Hanselmann
    env = {
1462 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1463 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1464 396e1b78 Michael Hanselmann
      }
1465 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1466 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1467 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1468 a8083063 Iustin Pop
1469 a8083063 Iustin Pop
  def CheckPrereq(self):
1470 a8083063 Iustin Pop
    """Check prerequisites.
1471 a8083063 Iustin Pop

1472 a8083063 Iustin Pop
    This checks:
1473 a8083063 Iustin Pop
     - the node exists in the configuration
1474 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1475 a8083063 Iustin Pop
     - it's not the master
1476 a8083063 Iustin Pop

1477 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1478 a8083063 Iustin Pop

1479 a8083063 Iustin Pop
    """
1480 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1481 a8083063 Iustin Pop
    if node is None:
1482 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1483 a8083063 Iustin Pop
1484 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1485 a8083063 Iustin Pop
1486 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1487 a8083063 Iustin Pop
    if node.name == masternode:
1488 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1489 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1490 a8083063 Iustin Pop
1491 a8083063 Iustin Pop
    for instance_name in instance_list:
1492 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1493 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1494 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1495 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1496 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1497 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1498 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1499 a8083063 Iustin Pop
    self.op.node_name = node.name
1500 a8083063 Iustin Pop
    self.node = node
1501 a8083063 Iustin Pop
1502 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1503 a8083063 Iustin Pop
    """Removes the node from the cluster.
1504 a8083063 Iustin Pop

1505 a8083063 Iustin Pop
    """
1506 a8083063 Iustin Pop
    node = self.node
1507 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1508 9a4f63d1 Iustin Pop
                 node.name)
1509 a8083063 Iustin Pop
1510 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1511 a8083063 Iustin Pop
1512 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1513 c8a0948f Michael Hanselmann
1514 a8083063 Iustin Pop
1515 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1516 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1517 a8083063 Iustin Pop

1518 a8083063 Iustin Pop
  """
1519 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1520 35705d8f Guido Trotter
  REQ_BGL = False
1521 31bf511f Iustin Pop
  _FIELDS_DYNAMIC = _FieldSet(
1522 31bf511f Iustin Pop
    "dtotal", "dfree",
1523 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1524 31bf511f Iustin Pop
    "bootid",
1525 31bf511f Iustin Pop
    "ctotal",
1526 31bf511f Iustin Pop
    )
1527 31bf511f Iustin Pop
1528 31bf511f Iustin Pop
  _FIELDS_STATIC = _FieldSet(
1529 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1530 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1531 31bf511f Iustin Pop
    "pip", "sip", "tags",
1532 31bf511f Iustin Pop
    "serial_no",
1533 31bf511f Iustin Pop
    )
1534 a8083063 Iustin Pop
1535 35705d8f Guido Trotter
  def ExpandNames(self):
1536 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1537 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1538 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1539 a8083063 Iustin Pop
1540 35705d8f Guido Trotter
    self.needed_locks = {}
1541 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1542 c8d8b4c8 Iustin Pop
1543 c8d8b4c8 Iustin Pop
    if self.op.names:
1544 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1545 35705d8f Guido Trotter
    else:
1546 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1547 c8d8b4c8 Iustin Pop
1548 31bf511f Iustin Pop
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1549 c8d8b4c8 Iustin Pop
    if self.do_locking:
1550 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1551 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1552 c8d8b4c8 Iustin Pop
1553 35705d8f Guido Trotter
1554 35705d8f Guido Trotter
  def CheckPrereq(self):
1555 35705d8f Guido Trotter
    """Check prerequisites.
1556 35705d8f Guido Trotter

1557 35705d8f Guido Trotter
    """
1558 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1559 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1560 c8d8b4c8 Iustin Pop
    pass
1561 a8083063 Iustin Pop
1562 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1563 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1564 a8083063 Iustin Pop

1565 a8083063 Iustin Pop
    """
1566 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1567 c8d8b4c8 Iustin Pop
    if self.do_locking:
1568 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1569 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1570 3fa93523 Guido Trotter
      nodenames = self.wanted
1571 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1572 3fa93523 Guido Trotter
      if missing:
1573 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1574 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1575 c8d8b4c8 Iustin Pop
    else:
1576 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1577 c1f1cbb2 Iustin Pop
1578 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
1579 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1580 a8083063 Iustin Pop
1581 a8083063 Iustin Pop
    # begin data gathering
1582 a8083063 Iustin Pop
1583 31bf511f Iustin Pop
    if self.do_locking:
1584 a8083063 Iustin Pop
      live_data = {}
1585 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1586 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
1587 a8083063 Iustin Pop
      for name in nodenames:
1588 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1589 a8083063 Iustin Pop
        if nodeinfo:
1590 a8083063 Iustin Pop
          live_data[name] = {
1591 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1592 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1593 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1594 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1595 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1596 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1597 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1598 a8083063 Iustin Pop
            }
1599 a8083063 Iustin Pop
        else:
1600 a8083063 Iustin Pop
          live_data[name] = {}
1601 a8083063 Iustin Pop
    else:
1602 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1603 a8083063 Iustin Pop
1604 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1605 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1606 a8083063 Iustin Pop
1607 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1608 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1609 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1610 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1611 a8083063 Iustin Pop
1612 ec223efb Iustin Pop
      for instance_name in instancelist:
1613 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1614 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1615 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1616 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1617 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1618 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1619 a8083063 Iustin Pop
1620 a8083063 Iustin Pop
    # end data gathering
1621 a8083063 Iustin Pop
1622 a8083063 Iustin Pop
    output = []
1623 a8083063 Iustin Pop
    for node in nodelist:
1624 a8083063 Iustin Pop
      node_output = []
1625 a8083063 Iustin Pop
      for field in self.op.output_fields:
1626 a8083063 Iustin Pop
        if field == "name":
1627 a8083063 Iustin Pop
          val = node.name
1628 ec223efb Iustin Pop
        elif field == "pinst_list":
1629 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1630 ec223efb Iustin Pop
        elif field == "sinst_list":
1631 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1632 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1633 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1634 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1635 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1636 a8083063 Iustin Pop
        elif field == "pip":
1637 a8083063 Iustin Pop
          val = node.primary_ip
1638 a8083063 Iustin Pop
        elif field == "sip":
1639 a8083063 Iustin Pop
          val = node.secondary_ip
1640 130a6a6f Iustin Pop
        elif field == "tags":
1641 130a6a6f Iustin Pop
          val = list(node.GetTags())
1642 38d7239a Iustin Pop
        elif field == "serial_no":
1643 38d7239a Iustin Pop
          val = node.serial_no
1644 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
1645 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1646 a8083063 Iustin Pop
        else:
1647 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1648 a8083063 Iustin Pop
        node_output.append(val)
1649 a8083063 Iustin Pop
      output.append(node_output)
1650 a8083063 Iustin Pop
1651 a8083063 Iustin Pop
    return output
1652 a8083063 Iustin Pop
1653 a8083063 Iustin Pop
1654 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1655 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1656 dcb93971 Michael Hanselmann

1657 dcb93971 Michael Hanselmann
  """
1658 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1659 21a15682 Guido Trotter
  REQ_BGL = False
1660 31bf511f Iustin Pop
  _FIELDS_DYNAMIC = _FieldSet("phys", "vg", "name", "size", "instance")
1661 31bf511f Iustin Pop
  _FIELDS_STATIC = _FieldSet("node")
1662 21a15682 Guido Trotter
1663 21a15682 Guido Trotter
  def ExpandNames(self):
1664 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1665 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1666 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1667 21a15682 Guido Trotter
1668 21a15682 Guido Trotter
    self.needed_locks = {}
1669 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1670 21a15682 Guido Trotter
    if not self.op.nodes:
1671 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1672 21a15682 Guido Trotter
    else:
1673 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1674 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1675 dcb93971 Michael Hanselmann
1676 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1677 dcb93971 Michael Hanselmann
    """Check prerequisites.
1678 dcb93971 Michael Hanselmann

1679 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1680 dcb93971 Michael Hanselmann

1681 dcb93971 Michael Hanselmann
    """
1682 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1683 dcb93971 Michael Hanselmann
1684 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1685 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1686 dcb93971 Michael Hanselmann

1687 dcb93971 Michael Hanselmann
    """
1688 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1689 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
1690 dcb93971 Michael Hanselmann
1691 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1692 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1693 dcb93971 Michael Hanselmann
1694 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1695 dcb93971 Michael Hanselmann
1696 dcb93971 Michael Hanselmann
    output = []
1697 dcb93971 Michael Hanselmann
    for node in nodenames:
1698 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1699 37d19eb2 Michael Hanselmann
        continue
1700 37d19eb2 Michael Hanselmann
1701 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1702 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1703 dcb93971 Michael Hanselmann
1704 dcb93971 Michael Hanselmann
      for vol in node_vols:
1705 dcb93971 Michael Hanselmann
        node_output = []
1706 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1707 dcb93971 Michael Hanselmann
          if field == "node":
1708 dcb93971 Michael Hanselmann
            val = node
1709 dcb93971 Michael Hanselmann
          elif field == "phys":
1710 dcb93971 Michael Hanselmann
            val = vol['dev']
1711 dcb93971 Michael Hanselmann
          elif field == "vg":
1712 dcb93971 Michael Hanselmann
            val = vol['vg']
1713 dcb93971 Michael Hanselmann
          elif field == "name":
1714 dcb93971 Michael Hanselmann
            val = vol['name']
1715 dcb93971 Michael Hanselmann
          elif field == "size":
1716 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1717 dcb93971 Michael Hanselmann
          elif field == "instance":
1718 dcb93971 Michael Hanselmann
            for inst in ilist:
1719 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1720 dcb93971 Michael Hanselmann
                continue
1721 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1722 dcb93971 Michael Hanselmann
                val = inst.name
1723 dcb93971 Michael Hanselmann
                break
1724 dcb93971 Michael Hanselmann
            else:
1725 dcb93971 Michael Hanselmann
              val = '-'
1726 dcb93971 Michael Hanselmann
          else:
1727 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1728 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1729 dcb93971 Michael Hanselmann
1730 dcb93971 Michael Hanselmann
        output.append(node_output)
1731 dcb93971 Michael Hanselmann
1732 dcb93971 Michael Hanselmann
    return output
1733 dcb93971 Michael Hanselmann
1734 dcb93971 Michael Hanselmann
1735 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1736 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1737 a8083063 Iustin Pop

1738 a8083063 Iustin Pop
  """
1739 a8083063 Iustin Pop
  HPATH = "node-add"
1740 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1741 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1742 a8083063 Iustin Pop
1743 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1744 a8083063 Iustin Pop
    """Build hooks env.
1745 a8083063 Iustin Pop

1746 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1747 a8083063 Iustin Pop

1748 a8083063 Iustin Pop
    """
1749 a8083063 Iustin Pop
    env = {
1750 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1751 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1752 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1753 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1754 a8083063 Iustin Pop
      }
1755 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1756 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1757 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1758 a8083063 Iustin Pop
1759 a8083063 Iustin Pop
  def CheckPrereq(self):
1760 a8083063 Iustin Pop
    """Check prerequisites.
1761 a8083063 Iustin Pop

1762 a8083063 Iustin Pop
    This checks:
1763 a8083063 Iustin Pop
     - the new node is not already in the config
1764 a8083063 Iustin Pop
     - it is resolvable
1765 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1766 a8083063 Iustin Pop

1767 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1768 a8083063 Iustin Pop

1769 a8083063 Iustin Pop
    """
1770 a8083063 Iustin Pop
    node_name = self.op.node_name
1771 a8083063 Iustin Pop
    cfg = self.cfg
1772 a8083063 Iustin Pop
1773 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1774 a8083063 Iustin Pop
1775 bcf043c9 Iustin Pop
    node = dns_data.name
1776 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1777 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1778 a8083063 Iustin Pop
    if secondary_ip is None:
1779 a8083063 Iustin Pop
      secondary_ip = primary_ip
1780 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1781 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1782 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1783 e7c6e02b Michael Hanselmann
1784 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1785 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1786 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1787 e7c6e02b Michael Hanselmann
                                 node)
1788 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1789 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1790 a8083063 Iustin Pop
1791 a8083063 Iustin Pop
    for existing_node_name in node_list:
1792 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1793 e7c6e02b Michael Hanselmann
1794 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1795 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1796 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1797 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1798 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1799 e7c6e02b Michael Hanselmann
        continue
1800 e7c6e02b Michael Hanselmann
1801 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1802 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1803 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1804 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1805 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1806 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1807 a8083063 Iustin Pop
1808 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1809 a8083063 Iustin Pop
    # same as for the master
1810 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
1811 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1812 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1813 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1814 a8083063 Iustin Pop
      if master_singlehomed:
1815 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1816 3ecf6786 Iustin Pop
                                   " new node has one")
1817 a8083063 Iustin Pop
      else:
1818 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1819 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1820 a8083063 Iustin Pop
1821 a8083063 Iustin Pop
    # checks reachablity
1822 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1823 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1824 a8083063 Iustin Pop
1825 a8083063 Iustin Pop
    if not newbie_singlehomed:
1826 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1827 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1828 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1829 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1830 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1831 a8083063 Iustin Pop
1832 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1833 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1834 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1835 a8083063 Iustin Pop
1836 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1837 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1838 a8083063 Iustin Pop

1839 a8083063 Iustin Pop
    """
1840 a8083063 Iustin Pop
    new_node = self.new_node
1841 a8083063 Iustin Pop
    node = new_node.name
1842 a8083063 Iustin Pop
1843 a8083063 Iustin Pop
    # check connectivity
1844 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
1845 a8083063 Iustin Pop
    if result:
1846 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1847 9a4f63d1 Iustin Pop
        logging.info("Communication to node %s fine, sw version %s match",
1848 9a4f63d1 Iustin Pop
                     node, result)
1849 a8083063 Iustin Pop
      else:
1850 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1851 3ecf6786 Iustin Pop
                                 " node version %s" %
1852 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1853 a8083063 Iustin Pop
    else:
1854 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1855 a8083063 Iustin Pop
1856 a8083063 Iustin Pop
    # setup ssh on node
1857 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
1858 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1859 a8083063 Iustin Pop
    keyarray = []
1860 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1861 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1862 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1863 a8083063 Iustin Pop
1864 a8083063 Iustin Pop
    for i in keyfiles:
1865 a8083063 Iustin Pop
      f = open(i, 'r')
1866 a8083063 Iustin Pop
      try:
1867 a8083063 Iustin Pop
        keyarray.append(f.read())
1868 a8083063 Iustin Pop
      finally:
1869 a8083063 Iustin Pop
        f.close()
1870 a8083063 Iustin Pop
1871 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
1872 72737a7f Iustin Pop
                                    keyarray[2],
1873 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
1874 a8083063 Iustin Pop
1875 a8083063 Iustin Pop
    if not result:
1876 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1877 a8083063 Iustin Pop
1878 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1879 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1880 c8a0948f Michael Hanselmann
1881 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1882 caad16e2 Iustin Pop
      if not self.rpc.call_node_has_ip_address(new_node.name,
1883 caad16e2 Iustin Pop
                                               new_node.secondary_ip):
1884 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1885 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1886 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1887 a8083063 Iustin Pop
1888 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
1889 5c0527ed Guido Trotter
    node_verify_param = {
1890 5c0527ed Guido Trotter
      'nodelist': [node],
1891 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1892 5c0527ed Guido Trotter
    }
1893 5c0527ed Guido Trotter
1894 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
1895 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
1896 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1897 5c0527ed Guido Trotter
      if not result[verifier]:
1898 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1899 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1900 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1901 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1902 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1903 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1904 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1905 ff98055b Iustin Pop
1906 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1907 a8083063 Iustin Pop
    # including the node just added
1908 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
1909 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1910 102b115b Michael Hanselmann
    if not self.op.readd:
1911 102b115b Michael Hanselmann
      dist_nodes.append(node)
1912 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1913 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1914 a8083063 Iustin Pop
1915 9a4f63d1 Iustin Pop
    logging.debug("Copying hosts and known_hosts to all nodes")
1916 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1917 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
1918 a8083063 Iustin Pop
      for to_node in dist_nodes:
1919 a8083063 Iustin Pop
        if not result[to_node]:
1920 9a4f63d1 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
1921 a8083063 Iustin Pop
1922 d6a02168 Michael Hanselmann
    to_copy = []
1923 00cd937c Iustin Pop
    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
1924 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1925 a8083063 Iustin Pop
    for fname in to_copy:
1926 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
1927 b5602d15 Guido Trotter
      if not result[node]:
1928 9a4f63d1 Iustin Pop
        logging.error("Could not copy file %s to node %s", fname, node)
1929 a8083063 Iustin Pop
1930 d8470559 Michael Hanselmann
    if self.op.readd:
1931 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1932 d8470559 Michael Hanselmann
    else:
1933 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1934 a8083063 Iustin Pop
1935 a8083063 Iustin Pop
1936 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1937 a8083063 Iustin Pop
  """Query cluster configuration.
1938 a8083063 Iustin Pop

1939 a8083063 Iustin Pop
  """
1940 a8083063 Iustin Pop
  _OP_REQP = []
1941 59322403 Iustin Pop
  REQ_MASTER = False
1942 642339cf Guido Trotter
  REQ_BGL = False
1943 642339cf Guido Trotter
1944 642339cf Guido Trotter
  def ExpandNames(self):
1945 642339cf Guido Trotter
    self.needed_locks = {}
1946 a8083063 Iustin Pop
1947 a8083063 Iustin Pop
  def CheckPrereq(self):
1948 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1949 a8083063 Iustin Pop

1950 a8083063 Iustin Pop
    """
1951 a8083063 Iustin Pop
    pass
1952 a8083063 Iustin Pop
1953 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1954 a8083063 Iustin Pop
    """Return cluster config.
1955 a8083063 Iustin Pop

1956 a8083063 Iustin Pop
    """
1957 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1958 a8083063 Iustin Pop
    result = {
1959 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1960 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1961 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1962 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1963 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1964 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1965 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
1966 469f88e1 Iustin Pop
      "master": cluster.master_node,
1967 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
1968 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
1969 469f88e1 Iustin Pop
      "hvparams": cluster.hvparams,
1970 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
1971 a8083063 Iustin Pop
      }
1972 a8083063 Iustin Pop
1973 a8083063 Iustin Pop
    return result
1974 a8083063 Iustin Pop
1975 a8083063 Iustin Pop
1976 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
1977 ae5849b5 Michael Hanselmann
  """Return configuration values.
1978 a8083063 Iustin Pop

1979 a8083063 Iustin Pop
  """
1980 a8083063 Iustin Pop
  _OP_REQP = []
1981 642339cf Guido Trotter
  REQ_BGL = False
1982 31bf511f Iustin Pop
  _FIELDS_DYNAMIC = _FieldSet()
1983 31bf511f Iustin Pop
  _FIELDS_STATIC = _FieldSet("cluster_name", "master_node", "drain_flag")
1984 642339cf Guido Trotter
1985 642339cf Guido Trotter
  def ExpandNames(self):
1986 642339cf Guido Trotter
    self.needed_locks = {}
1987 a8083063 Iustin Pop
1988 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1989 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1990 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
1991 ae5849b5 Michael Hanselmann
1992 a8083063 Iustin Pop
  def CheckPrereq(self):
1993 a8083063 Iustin Pop
    """No prerequisites.
1994 a8083063 Iustin Pop

1995 a8083063 Iustin Pop
    """
1996 a8083063 Iustin Pop
    pass
1997 a8083063 Iustin Pop
1998 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1999 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2000 a8083063 Iustin Pop

2001 a8083063 Iustin Pop
    """
2002 ae5849b5 Michael Hanselmann
    values = []
2003 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2004 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2005 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2006 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2007 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2008 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2009 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2010 ae5849b5 Michael Hanselmann
      else:
2011 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2012 3ccafd0e Iustin Pop
      values.append(entry)
2013 ae5849b5 Michael Hanselmann
    return values
2014 a8083063 Iustin Pop
2015 a8083063 Iustin Pop
2016 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2017 a8083063 Iustin Pop
  """Bring up an instance's disks.
2018 a8083063 Iustin Pop

2019 a8083063 Iustin Pop
  """
2020 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2021 f22a8ba3 Guido Trotter
  REQ_BGL = False
2022 f22a8ba3 Guido Trotter
2023 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2024 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2025 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2026 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2027 f22a8ba3 Guido Trotter
2028 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2029 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2030 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2031 a8083063 Iustin Pop
2032 a8083063 Iustin Pop
  def CheckPrereq(self):
2033 a8083063 Iustin Pop
    """Check prerequisites.
2034 a8083063 Iustin Pop

2035 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2036 a8083063 Iustin Pop

2037 a8083063 Iustin Pop
    """
2038 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2039 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2040 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2041 a8083063 Iustin Pop
2042 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2043 a8083063 Iustin Pop
    """Activate the disks.
2044 a8083063 Iustin Pop

2045 a8083063 Iustin Pop
    """
2046 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2047 a8083063 Iustin Pop
    if not disks_ok:
2048 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2049 a8083063 Iustin Pop
2050 a8083063 Iustin Pop
    return disks_info
2051 a8083063 Iustin Pop
2052 a8083063 Iustin Pop
2053 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2054 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2055 a8083063 Iustin Pop

2056 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2057 a8083063 Iustin Pop

2058 a8083063 Iustin Pop
  Args:
2059 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
2060 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
2061 a8083063 Iustin Pop
                        in an error return from the function
2062 a8083063 Iustin Pop

2063 a8083063 Iustin Pop
  Returns:
2064 a8083063 Iustin Pop
    false if the operation failed
2065 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
2066 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
2067 a8083063 Iustin Pop
  """
2068 a8083063 Iustin Pop
  device_info = []
2069 a8083063 Iustin Pop
  disks_ok = True
2070 fdbd668d Iustin Pop
  iname = instance.name
2071 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2072 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2073 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2074 fdbd668d Iustin Pop
2075 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2076 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2077 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2078 fdbd668d Iustin Pop
  # SyncSource, etc.)
2079 fdbd668d Iustin Pop
2080 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2081 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2082 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2083 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2084 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2085 a8083063 Iustin Pop
      if not result:
2086 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2087 86d9d3bb Iustin Pop
                           " (is_primary=False, pass=1)",
2088 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2089 fdbd668d Iustin Pop
        if not ignore_secondaries:
2090 a8083063 Iustin Pop
          disks_ok = False
2091 fdbd668d Iustin Pop
2092 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2093 fdbd668d Iustin Pop
2094 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2095 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2096 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2097 fdbd668d Iustin Pop
      if node != instance.primary_node:
2098 fdbd668d Iustin Pop
        continue
2099 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2100 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2101 fdbd668d Iustin Pop
      if not result:
2102 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2103 86d9d3bb Iustin Pop
                           " (is_primary=True, pass=2)",
2104 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2105 fdbd668d Iustin Pop
        disks_ok = False
2106 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
2107 a8083063 Iustin Pop
2108 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2109 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2110 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2111 b352ab5b Iustin Pop
  for disk in instance.disks:
2112 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2113 b352ab5b Iustin Pop
2114 a8083063 Iustin Pop
  return disks_ok, device_info
2115 a8083063 Iustin Pop
2116 a8083063 Iustin Pop
2117 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2118 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2119 3ecf6786 Iustin Pop

2120 3ecf6786 Iustin Pop
  """
2121 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2122 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2123 fe7b0351 Michael Hanselmann
  if not disks_ok:
2124 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2125 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2126 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2127 86d9d3bb Iustin Pop
                         " secondary node,"
2128 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2129 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2130 fe7b0351 Michael Hanselmann
2131 fe7b0351 Michael Hanselmann
2132 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2133 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2134 a8083063 Iustin Pop

2135 a8083063 Iustin Pop
  """
2136 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2137 f22a8ba3 Guido Trotter
  REQ_BGL = False
2138 f22a8ba3 Guido Trotter
2139 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2140 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2141 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2142 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2143 f22a8ba3 Guido Trotter
2144 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2145 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2146 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2147 a8083063 Iustin Pop
2148 a8083063 Iustin Pop
  def CheckPrereq(self):
2149 a8083063 Iustin Pop
    """Check prerequisites.
2150 a8083063 Iustin Pop

2151 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2152 a8083063 Iustin Pop

2153 a8083063 Iustin Pop
    """
2154 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2155 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2156 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2157 a8083063 Iustin Pop
2158 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2159 a8083063 Iustin Pop
    """Deactivate the disks
2160 a8083063 Iustin Pop

2161 a8083063 Iustin Pop
    """
2162 a8083063 Iustin Pop
    instance = self.instance
2163 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2164 a8083063 Iustin Pop
2165 a8083063 Iustin Pop
2166 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2167 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2168 155d6c75 Guido Trotter

2169 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2170 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2171 155d6c75 Guido Trotter

2172 155d6c75 Guido Trotter
  """
2173 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2174 72737a7f Iustin Pop
                                      [instance.hypervisor])
2175 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2176 155d6c75 Guido Trotter
  if not type(ins_l) is list:
2177 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2178 155d6c75 Guido Trotter
                             instance.primary_node)
2179 155d6c75 Guido Trotter
2180 155d6c75 Guido Trotter
  if instance.name in ins_l:
2181 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2182 155d6c75 Guido Trotter
                             " block devices.")
2183 155d6c75 Guido Trotter
2184 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2185 a8083063 Iustin Pop
2186 a8083063 Iustin Pop
2187 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2188 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2189 a8083063 Iustin Pop

2190 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2191 a8083063 Iustin Pop

2192 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2193 a8083063 Iustin Pop
  ignored.
2194 a8083063 Iustin Pop

2195 a8083063 Iustin Pop
  """
2196 a8083063 Iustin Pop
  result = True
2197 a8083063 Iustin Pop
  for disk in instance.disks:
2198 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2199 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2200 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_shutdown(node, top_disk):
2201 9a4f63d1 Iustin Pop
        logging.error("Could not shutdown block device %s on node %s",
2202 9a4f63d1 Iustin Pop
                      disk.iv_name, node)
2203 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2204 a8083063 Iustin Pop
          result = False
2205 a8083063 Iustin Pop
  return result
2206 a8083063 Iustin Pop
2207 a8083063 Iustin Pop
2208 b9bddb6b Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
2209 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2210 d4f16fd9 Iustin Pop

2211 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2212 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2213 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2214 d4f16fd9 Iustin Pop
  exception.
2215 d4f16fd9 Iustin Pop

2216 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2217 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2218 e69d05fd Iustin Pop
  @type node: C{str}
2219 e69d05fd Iustin Pop
  @param node: the node to check
2220 e69d05fd Iustin Pop
  @type reason: C{str}
2221 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2222 e69d05fd Iustin Pop
  @type requested: C{int}
2223 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2224 e69d05fd Iustin Pop
  @type hypervisor: C{str}
2225 e69d05fd Iustin Pop
  @param hypervisor: the hypervisor to ask for memory stats
2226 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2227 e69d05fd Iustin Pop
      we cannot check the node
2228 d4f16fd9 Iustin Pop

2229 d4f16fd9 Iustin Pop
  """
2230 72737a7f Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
2231 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2232 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2233 d4f16fd9 Iustin Pop
                             " information" % (node,))
2234 d4f16fd9 Iustin Pop
2235 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2236 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2237 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2238 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2239 d4f16fd9 Iustin Pop
  if requested > free_mem:
2240 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2241 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2242 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2243 d4f16fd9 Iustin Pop
2244 d4f16fd9 Iustin Pop
2245 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2246 a8083063 Iustin Pop
  """Starts an instance.
2247 a8083063 Iustin Pop

2248 a8083063 Iustin Pop
  """
2249 a8083063 Iustin Pop
  HPATH = "instance-start"
2250 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2251 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2252 e873317a Guido Trotter
  REQ_BGL = False
2253 e873317a Guido Trotter
2254 e873317a Guido Trotter
  def ExpandNames(self):
2255 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2256 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2257 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2258 e873317a Guido Trotter
2259 e873317a Guido Trotter
  def DeclareLocks(self, level):
2260 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2261 e873317a Guido Trotter
      self._LockInstancesNodes()
2262 a8083063 Iustin Pop
2263 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2264 a8083063 Iustin Pop
    """Build hooks env.
2265 a8083063 Iustin Pop

2266 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2267 a8083063 Iustin Pop

2268 a8083063 Iustin Pop
    """
2269 a8083063 Iustin Pop
    env = {
2270 a8083063 Iustin Pop
      "FORCE": self.op.force,
2271 a8083063 Iustin Pop
      }
2272 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2273 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2274 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2275 a8083063 Iustin Pop
    return env, nl, nl
2276 a8083063 Iustin Pop
2277 a8083063 Iustin Pop
  def CheckPrereq(self):
2278 a8083063 Iustin Pop
    """Check prerequisites.
2279 a8083063 Iustin Pop

2280 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2281 a8083063 Iustin Pop

2282 a8083063 Iustin Pop
    """
2283 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2284 e873317a Guido Trotter
    assert self.instance is not None, \
2285 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2286 a8083063 Iustin Pop
2287 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2288 a8083063 Iustin Pop
    # check bridges existance
2289 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2290 a8083063 Iustin Pop
2291 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, instance.primary_node,
2292 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2293 338e51e8 Iustin Pop
                         bep[constants.BE_MEMORY], instance.hypervisor)
2294 d4f16fd9 Iustin Pop
2295 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2296 a8083063 Iustin Pop
    """Start the instance.
2297 a8083063 Iustin Pop

2298 a8083063 Iustin Pop
    """
2299 a8083063 Iustin Pop
    instance = self.instance
2300 a8083063 Iustin Pop
    force = self.op.force
2301 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2302 a8083063 Iustin Pop
2303 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2304 fe482621 Iustin Pop
2305 a8083063 Iustin Pop
    node_current = instance.primary_node
2306 a8083063 Iustin Pop
2307 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2308 a8083063 Iustin Pop
2309 72737a7f Iustin Pop
    if not self.rpc.call_instance_start(node_current, instance, extra_args):
2310 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2311 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2312 a8083063 Iustin Pop
2313 a8083063 Iustin Pop
2314 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2315 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2316 bf6929a2 Alexander Schreiber

2317 bf6929a2 Alexander Schreiber
  """
2318 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2319 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2320 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2321 e873317a Guido Trotter
  REQ_BGL = False
2322 e873317a Guido Trotter
2323 e873317a Guido Trotter
  def ExpandNames(self):
2324 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2325 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2326 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2327 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2328 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2329 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2330 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2331 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2332 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2333 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2334 e873317a Guido Trotter
2335 e873317a Guido Trotter
  def DeclareLocks(self, level):
2336 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2337 849da276 Guido Trotter
      primary_only = not constants.INSTANCE_REBOOT_FULL
2338 849da276 Guido Trotter
      self._LockInstancesNodes(primary_only=primary_only)
2339 bf6929a2 Alexander Schreiber
2340 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2341 bf6929a2 Alexander Schreiber
    """Build hooks env.
2342 bf6929a2 Alexander Schreiber

2343 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2344 bf6929a2 Alexander Schreiber

2345 bf6929a2 Alexander Schreiber
    """
2346 bf6929a2 Alexander Schreiber
    env = {
2347 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2348 bf6929a2 Alexander Schreiber
      }
2349 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2350 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2351 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2352 bf6929a2 Alexander Schreiber
    return env, nl, nl
2353 bf6929a2 Alexander Schreiber
2354 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2355 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2356 bf6929a2 Alexander Schreiber

2357 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2358 bf6929a2 Alexander Schreiber

2359 bf6929a2 Alexander Schreiber
    """
2360 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2361 e873317a Guido Trotter
    assert self.instance is not None, \
2362 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2363 bf6929a2 Alexander Schreiber
2364 bf6929a2 Alexander Schreiber
    # check bridges existance
2365 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2366 bf6929a2 Alexander Schreiber
2367 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2368 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2369 bf6929a2 Alexander Schreiber

2370 bf6929a2 Alexander Schreiber
    """
2371 bf6929a2 Alexander Schreiber
    instance = self.instance
2372 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2373 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2374 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2375 bf6929a2 Alexander Schreiber
2376 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2377 bf6929a2 Alexander Schreiber
2378 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2379 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2380 72737a7f Iustin Pop
      if not self.rpc.call_instance_reboot(node_current, instance,
2381 72737a7f Iustin Pop
                                           reboot_type, extra_args):
2382 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2383 bf6929a2 Alexander Schreiber
    else:
2384 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(node_current, instance):
2385 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2386 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2387 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
2388 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(node_current, instance, extra_args):
2389 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2390 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2391 bf6929a2 Alexander Schreiber
2392 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2393 bf6929a2 Alexander Schreiber
2394 bf6929a2 Alexander Schreiber
2395 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2396 a8083063 Iustin Pop
  """Shutdown an instance.
2397 a8083063 Iustin Pop

2398 a8083063 Iustin Pop
  """
2399 a8083063 Iustin Pop
  HPATH = "instance-stop"
2400 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2401 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2402 e873317a Guido Trotter
  REQ_BGL = False
2403 e873317a Guido Trotter
2404 e873317a Guido Trotter
  def ExpandNames(self):
2405 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2406 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2407 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2408 e873317a Guido Trotter
2409 e873317a Guido Trotter
  def DeclareLocks(self, level):
2410 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2411 e873317a Guido Trotter
      self._LockInstancesNodes()
2412 a8083063 Iustin Pop
2413 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2414 a8083063 Iustin Pop
    """Build hooks env.
2415 a8083063 Iustin Pop

2416 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2417 a8083063 Iustin Pop

2418 a8083063 Iustin Pop
    """
2419 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2420 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2421 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2422 a8083063 Iustin Pop
    return env, nl, nl
2423 a8083063 Iustin Pop
2424 a8083063 Iustin Pop
  def CheckPrereq(self):
2425 a8083063 Iustin Pop
    """Check prerequisites.
2426 a8083063 Iustin Pop

2427 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2428 a8083063 Iustin Pop

2429 a8083063 Iustin Pop
    """
2430 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2431 e873317a Guido Trotter
    assert self.instance is not None, \
2432 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2433 a8083063 Iustin Pop
2434 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2435 a8083063 Iustin Pop
    """Shutdown the instance.
2436 a8083063 Iustin Pop

2437 a8083063 Iustin Pop
    """
2438 a8083063 Iustin Pop
    instance = self.instance
2439 a8083063 Iustin Pop
    node_current = instance.primary_node
2440 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2441 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(node_current, instance):
2442 86d9d3bb Iustin Pop
      self.proc.LogWarning("Could not shutdown instance")
2443 a8083063 Iustin Pop
2444 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
2445 a8083063 Iustin Pop
2446 a8083063 Iustin Pop
2447 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2448 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2449 fe7b0351 Michael Hanselmann

2450 fe7b0351 Michael Hanselmann
  """
2451 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2452 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2453 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2454 4e0b4d2d Guido Trotter
  REQ_BGL = False
2455 4e0b4d2d Guido Trotter
2456 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2457 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2458 4e0b4d2d Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2459 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2460 4e0b4d2d Guido Trotter
2461 4e0b4d2d Guido Trotter
  def DeclareLocks(self, level):
2462 4e0b4d2d Guido Trotter
    if level == locking.LEVEL_NODE:
2463 4e0b4d2d Guido Trotter
      self._LockInstancesNodes()
2464 fe7b0351 Michael Hanselmann
2465 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2466 fe7b0351 Michael Hanselmann
    """Build hooks env.
2467 fe7b0351 Michael Hanselmann

2468 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2469 fe7b0351 Michael Hanselmann

2470 fe7b0351 Michael Hanselmann
    """
2471 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2472 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2473 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2474 fe7b0351 Michael Hanselmann
    return env, nl, nl
2475 fe7b0351 Michael Hanselmann
2476 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2477 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2478 fe7b0351 Michael Hanselmann

2479 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2480 fe7b0351 Michael Hanselmann

2481 fe7b0351 Michael Hanselmann
    """
2482 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2483 4e0b4d2d Guido Trotter
    assert instance is not None, \
2484 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2485 4e0b4d2d Guido Trotter
2486 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2487 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2488 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2489 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2490 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2491 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2492 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2493 72737a7f Iustin Pop
                                              instance.name,
2494 72737a7f Iustin Pop
                                              instance.hypervisor)
2495 fe7b0351 Michael Hanselmann
    if remote_info:
2496 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2497 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2498 3ecf6786 Iustin Pop
                                  instance.primary_node))
2499 d0834de3 Michael Hanselmann
2500 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2501 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2502 d0834de3 Michael Hanselmann
      # OS verification
2503 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2504 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2505 d0834de3 Michael Hanselmann
      if pnode is None:
2506 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2507 3ecf6786 Iustin Pop
                                   self.op.pnode)
2508 72737a7f Iustin Pop
      os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
2509 dfa96ded Guido Trotter
      if not os_obj:
2510 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2511 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2512 d0834de3 Michael Hanselmann
2513 fe7b0351 Michael Hanselmann
    self.instance = instance
2514 fe7b0351 Michael Hanselmann
2515 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2516 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2517 fe7b0351 Michael Hanselmann

2518 fe7b0351 Michael Hanselmann
    """
2519 fe7b0351 Michael Hanselmann
    inst = self.instance
2520 fe7b0351 Michael Hanselmann
2521 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2522 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2523 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2524 97abc79f Iustin Pop
      self.cfg.Update(inst)
2525 d0834de3 Michael Hanselmann
2526 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2527 fe7b0351 Michael Hanselmann
    try:
2528 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2529 bb2ee932 Iustin Pop
      if not self.rpc.call_instance_os_add(inst.primary_node, inst):
2530 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2531 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2532 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2533 fe7b0351 Michael Hanselmann
    finally:
2534 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2535 fe7b0351 Michael Hanselmann
2536 fe7b0351 Michael Hanselmann
2537 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2538 decd5f45 Iustin Pop
  """Rename an instance.
2539 decd5f45 Iustin Pop

2540 decd5f45 Iustin Pop
  """
2541 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2542 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2543 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2544 decd5f45 Iustin Pop
2545 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2546 decd5f45 Iustin Pop
    """Build hooks env.
2547 decd5f45 Iustin Pop

2548 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2549 decd5f45 Iustin Pop

2550 decd5f45 Iustin Pop
    """
2551 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2552 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2553 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2554 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2555 decd5f45 Iustin Pop
    return env, nl, nl
2556 decd5f45 Iustin Pop
2557 decd5f45 Iustin Pop
  def CheckPrereq(self):
2558 decd5f45 Iustin Pop
    """Check prerequisites.
2559 decd5f45 Iustin Pop

2560 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2561 decd5f45 Iustin Pop

2562 decd5f45 Iustin Pop
    """
2563 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2564 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2565 decd5f45 Iustin Pop
    if instance is None:
2566 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2567 decd5f45 Iustin Pop
                                 self.op.instance_name)
2568 decd5f45 Iustin Pop
    if instance.status != "down":
2569 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2570 decd5f45 Iustin Pop
                                 self.op.instance_name)
2571 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2572 72737a7f Iustin Pop
                                              instance.name,
2573 72737a7f Iustin Pop
                                              instance.hypervisor)
2574 decd5f45 Iustin Pop
    if remote_info:
2575 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2576 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2577 decd5f45 Iustin Pop
                                  instance.primary_node))
2578 decd5f45 Iustin Pop
    self.instance = instance
2579 decd5f45 Iustin Pop
2580 decd5f45 Iustin Pop
    # new name verification
2581 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2582 decd5f45 Iustin Pop
2583 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2584 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2585 7bde3275 Guido Trotter
    if new_name in instance_list:
2586 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2587 c09f363f Manuel Franceschini
                                 new_name)
2588 7bde3275 Guido Trotter
2589 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2590 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2591 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2592 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2593 decd5f45 Iustin Pop
2594 decd5f45 Iustin Pop
2595 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2596 decd5f45 Iustin Pop
    """Reinstall the instance.
2597 decd5f45 Iustin Pop

2598 decd5f45 Iustin Pop
    """
2599 decd5f45 Iustin Pop
    inst = self.instance
2600 decd5f45 Iustin Pop
    old_name = inst.name
2601 decd5f45 Iustin Pop
2602 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2603 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2604 b23c4333 Manuel Franceschini
2605 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2606 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2607 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2608 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2609 decd5f45 Iustin Pop
2610 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2611 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2612 decd5f45 Iustin Pop
2613 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2614 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2615 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2616 72737a7f Iustin Pop
                                                     old_file_storage_dir,
2617 72737a7f Iustin Pop
                                                     new_file_storage_dir)
2618 b23c4333 Manuel Franceschini
2619 b23c4333 Manuel Franceschini
      if not result:
2620 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2621 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2622 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2623 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2624 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2625 b23c4333 Manuel Franceschini
2626 b23c4333 Manuel Franceschini
      if not result[0]:
2627 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2628 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2629 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2630 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2631 b23c4333 Manuel Franceschini
2632 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2633 decd5f45 Iustin Pop
    try:
2634 72737a7f Iustin Pop
      if not self.rpc.call_instance_run_rename(inst.primary_node, inst,
2635 d15a9ad3 Guido Trotter
                                               old_name):
2636 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2637 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2638 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2639 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
2640 decd5f45 Iustin Pop
    finally:
2641 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2642 decd5f45 Iustin Pop
2643 decd5f45 Iustin Pop
2644 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2645 a8083063 Iustin Pop
  """Remove an instance.
2646 a8083063 Iustin Pop

2647 a8083063 Iustin Pop
  """
2648 a8083063 Iustin Pop
  HPATH = "instance-remove"
2649 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2650 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2651 cf472233 Guido Trotter
  REQ_BGL = False
2652 cf472233 Guido Trotter
2653 cf472233 Guido Trotter
  def ExpandNames(self):
2654 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
2655 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2656 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2657 cf472233 Guido Trotter
2658 cf472233 Guido Trotter
  def DeclareLocks(self, level):
2659 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
2660 cf472233 Guido Trotter
      self._LockInstancesNodes()
2661 a8083063 Iustin Pop
2662 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2663 a8083063 Iustin Pop
    """Build hooks env.
2664 a8083063 Iustin Pop

2665 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2666 a8083063 Iustin Pop

2667 a8083063 Iustin Pop
    """
2668 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2669 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
2670 a8083063 Iustin Pop
    return env, nl, nl
2671 a8083063 Iustin Pop
2672 a8083063 Iustin Pop
  def CheckPrereq(self):
2673 a8083063 Iustin Pop
    """Check prerequisites.
2674 a8083063 Iustin Pop

2675 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2676 a8083063 Iustin Pop

2677 a8083063 Iustin Pop
    """
2678 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2679 cf472233 Guido Trotter
    assert self.instance is not None, \
2680 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2681 a8083063 Iustin Pop
2682 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2683 a8083063 Iustin Pop
    """Remove the instance.
2684 a8083063 Iustin Pop

2685 a8083063 Iustin Pop
    """
2686 a8083063 Iustin Pop
    instance = self.instance
2687 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
2688 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
2689 a8083063 Iustin Pop
2690 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(instance.primary_node, instance):
2691 1d67656e Iustin Pop
      if self.op.ignore_failures:
2692 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2693 1d67656e Iustin Pop
      else:
2694 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2695 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2696 a8083063 Iustin Pop
2697 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
2698 a8083063 Iustin Pop
2699 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
2700 1d67656e Iustin Pop
      if self.op.ignore_failures:
2701 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2702 1d67656e Iustin Pop
      else:
2703 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2704 a8083063 Iustin Pop
2705 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
2706 a8083063 Iustin Pop
2707 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2708 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2709 a8083063 Iustin Pop
2710 a8083063 Iustin Pop
2711 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2712 a8083063 Iustin Pop
  """Logical unit for querying instances.
2713 a8083063 Iustin Pop

2714 a8083063 Iustin Pop
  """
2715 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2716 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2717 31bf511f Iustin Pop
  _FIELDS_STATIC = _FieldSet(*["name", "os", "pnode", "snodes",
2718 31bf511f Iustin Pop
                               "admin_state", "admin_ram",
2719 31bf511f Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2720 31bf511f Iustin Pop
                               "sda_size", "sdb_size", "vcpus", "tags",
2721 31bf511f Iustin Pop
                               "network_port", "beparams",
2722 71c1af58 Iustin Pop
                               "(disk).(size)/([0-9]+)",
2723 71c1af58 Iustin Pop
                               "(nic).(mac|ip|bridge)/([0-9]+)",
2724 71c1af58 Iustin Pop
                               "(disk|nic).(count)",
2725 31bf511f Iustin Pop
                               "serial_no", "hypervisor", "hvparams",] +
2726 31bf511f Iustin Pop
                             ["hv/%s" % name
2727 31bf511f Iustin Pop
                              for name in constants.HVS_PARAMETERS] +
2728 31bf511f Iustin Pop
                             ["be/%s" % name
2729 31bf511f Iustin Pop
                              for name in constants.BES_PARAMETERS])
2730 31bf511f Iustin Pop
  _FIELDS_DYNAMIC = _FieldSet("oper_state", "oper_ram", "status")
2731 31bf511f Iustin Pop
2732 a8083063 Iustin Pop
2733 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2734 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2735 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2736 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2737 a8083063 Iustin Pop
2738 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2739 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2740 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2741 7eb9d8f7 Guido Trotter
2742 57a2fb91 Iustin Pop
    if self.op.names:
2743 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
2744 7eb9d8f7 Guido Trotter
    else:
2745 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
2746 7eb9d8f7 Guido Trotter
2747 31bf511f Iustin Pop
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2748 57a2fb91 Iustin Pop
    if self.do_locking:
2749 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2750 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
2751 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2752 7eb9d8f7 Guido Trotter
2753 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2754 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
2755 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2756 7eb9d8f7 Guido Trotter
2757 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2758 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2759 7eb9d8f7 Guido Trotter

2760 7eb9d8f7 Guido Trotter
    """
2761 57a2fb91 Iustin Pop
    pass
2762 069dcc86 Iustin Pop
2763 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2764 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2765 a8083063 Iustin Pop

2766 a8083063 Iustin Pop
    """
2767 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
2768 57a2fb91 Iustin Pop
    if self.do_locking:
2769 57a2fb91 Iustin Pop
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2770 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2771 3fa93523 Guido Trotter
      instance_names = self.wanted
2772 3fa93523 Guido Trotter
      missing = set(instance_names).difference(all_info.keys())
2773 3fa93523 Guido Trotter
      if missing:
2774 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2775 3fa93523 Guido Trotter
          "Some instances were removed before retrieving their data: %s"
2776 3fa93523 Guido Trotter
          % missing)
2777 57a2fb91 Iustin Pop
    else:
2778 57a2fb91 Iustin Pop
      instance_names = all_info.keys()
2779 c1f1cbb2 Iustin Pop
2780 c1f1cbb2 Iustin Pop
    instance_names = utils.NiceSort(instance_names)
2781 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
2782 a8083063 Iustin Pop
2783 a8083063 Iustin Pop
    # begin data gathering
2784 a8083063 Iustin Pop
2785 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2786 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
2787 a8083063 Iustin Pop
2788 a8083063 Iustin Pop
    bad_nodes = []
2789 31bf511f Iustin Pop
    if self.do_locking:
2790 a8083063 Iustin Pop
      live_data = {}
2791 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
2792 a8083063 Iustin Pop
      for name in nodes:
2793 a8083063 Iustin Pop
        result = node_data[name]
2794 a8083063 Iustin Pop
        if result:
2795 a8083063 Iustin Pop
          live_data.update(result)
2796 a8083063 Iustin Pop
        elif result == False:
2797 a8083063 Iustin Pop
          bad_nodes.append(name)
2798 a8083063 Iustin Pop
        # else no instance is alive
2799 a8083063 Iustin Pop
    else:
2800 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2801 a8083063 Iustin Pop
2802 a8083063 Iustin Pop
    # end data gathering
2803 a8083063 Iustin Pop
2804 5018a335 Iustin Pop
    HVPREFIX = "hv/"
2805 338e51e8 Iustin Pop
    BEPREFIX = "be/"
2806 a8083063 Iustin Pop
    output = []
2807 a8083063 Iustin Pop
    for instance in instance_list:
2808 a8083063 Iustin Pop
      iout = []
2809 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
2810 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
2811 a8083063 Iustin Pop
      for field in self.op.output_fields:
2812 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
2813 a8083063 Iustin Pop
        if field == "name":
2814 a8083063 Iustin Pop
          val = instance.name
2815 a8083063 Iustin Pop
        elif field == "os":
2816 a8083063 Iustin Pop
          val = instance.os
2817 a8083063 Iustin Pop
        elif field == "pnode":
2818 a8083063 Iustin Pop
          val = instance.primary_node
2819 a8083063 Iustin Pop
        elif field == "snodes":
2820 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2821 a8083063 Iustin Pop
        elif field == "admin_state":
2822 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2823 a8083063 Iustin Pop
        elif field == "oper_state":
2824 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2825 8a23d2d3 Iustin Pop
            val = None
2826 a8083063 Iustin Pop
          else:
2827 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2828 d8052456 Iustin Pop
        elif field == "status":
2829 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2830 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2831 d8052456 Iustin Pop
          else:
2832 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2833 d8052456 Iustin Pop
            if running:
2834 d8052456 Iustin Pop
              if instance.status != "down":
2835 d8052456 Iustin Pop
                val = "running"
2836 d8052456 Iustin Pop
              else:
2837 d8052456 Iustin Pop
                val = "ERROR_up"
2838 d8052456 Iustin Pop
            else:
2839 d8052456 Iustin Pop
              if instance.status != "down":
2840 d8052456 Iustin Pop
                val = "ERROR_down"
2841 d8052456 Iustin Pop
              else:
2842 d8052456 Iustin Pop
                val = "ADMIN_down"
2843 a8083063 Iustin Pop
        elif field == "oper_ram":
2844 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2845 8a23d2d3 Iustin Pop
            val = None
2846 a8083063 Iustin Pop
          elif instance.name in live_data:
2847 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2848 a8083063 Iustin Pop
          else:
2849 a8083063 Iustin Pop
            val = "-"
2850 a8083063 Iustin Pop
        elif field == "disk_template":
2851 a8083063 Iustin Pop
          val = instance.disk_template
2852 a8083063 Iustin Pop
        elif field == "ip":
2853 a8083063 Iustin Pop
          val = instance.nics[0].ip
2854 a8083063 Iustin Pop
        elif field == "bridge":
2855 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2856 a8083063 Iustin Pop
        elif field == "mac":
2857 a8083063 Iustin Pop
          val = instance.nics[0].mac
2858 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2859 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2860 644eeef9 Iustin Pop
          if disk is None:
2861 8a23d2d3 Iustin Pop
            val = None
2862 644eeef9 Iustin Pop
          else:
2863 644eeef9 Iustin Pop
            val = disk.size
2864 130a6a6f Iustin Pop
        elif field == "tags":
2865 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2866 38d7239a Iustin Pop
        elif field == "serial_no":
2867 38d7239a Iustin Pop
          val = instance.serial_no
2868 5018a335 Iustin Pop
        elif field == "network_port":
2869 5018a335 Iustin Pop
          val = instance.network_port
2870 338e51e8 Iustin Pop
        elif field == "hypervisor":
2871 338e51e8 Iustin Pop
          val = instance.hypervisor
2872 338e51e8 Iustin Pop
        elif field == "hvparams":
2873 338e51e8 Iustin Pop
          val = i_hv
2874 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
2875 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
2876 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
2877 338e51e8 Iustin Pop
        elif field == "beparams":
2878 338e51e8 Iustin Pop
          val = i_be
2879 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
2880 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
2881 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
2882 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
2883 71c1af58 Iustin Pop
          # matches a variable list
2884 71c1af58 Iustin Pop
          st_groups = st_match.groups()
2885 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
2886 71c1af58 Iustin Pop
            if st_groups[1] == "count":
2887 71c1af58 Iustin Pop
              val = len(instance.disks)
2888 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
2889 71c1af58 Iustin Pop
              disk_idx = int(st_groups[2])
2890 71c1af58 Iustin Pop
              if disk_idx >= len(instance.disks):
2891 71c1af58 Iustin Pop
                val = None
2892 71c1af58 Iustin Pop
              else:
2893 71c1af58 Iustin Pop
                val = instance.disks[disk_idx].size
2894 71c1af58 Iustin Pop
            else:
2895 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
2896 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
2897 71c1af58 Iustin Pop
            if st_groups[1] == "count":
2898 71c1af58 Iustin Pop
              val = len(instance.nics)
2899 71c1af58 Iustin Pop
            else:
2900 71c1af58 Iustin Pop
              # index-based item
2901 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
2902 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
2903 71c1af58 Iustin Pop
                val = None
2904 71c1af58 Iustin Pop
              else:
2905 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
2906 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
2907 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
2908 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
2909 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
2910 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].bridge
2911 71c1af58 Iustin Pop
                else:
2912 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
2913 71c1af58 Iustin Pop
          else:
2914 71c1af58 Iustin Pop
            assert False, "Unhandled variable parameter"
2915 a8083063 Iustin Pop
        else:
2916 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2917 a8083063 Iustin Pop
        iout.append(val)
2918 a8083063 Iustin Pop
      output.append(iout)
2919 a8083063 Iustin Pop
2920 a8083063 Iustin Pop
    return output
2921 a8083063 Iustin Pop
2922 a8083063 Iustin Pop
2923 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2924 a8083063 Iustin Pop
  """Failover an instance.
2925 a8083063 Iustin Pop

2926 a8083063 Iustin Pop
  """
2927 a8083063 Iustin Pop
  HPATH = "instance-failover"
2928 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2929 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2930 c9e5c064 Guido Trotter
  REQ_BGL = False
2931 c9e5c064 Guido Trotter
2932 c9e5c064 Guido Trotter
  def ExpandNames(self):
2933 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2934 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2935 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2936 c9e5c064 Guido Trotter
2937 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2938 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2939 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2940 a8083063 Iustin Pop
2941 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2942 a8083063 Iustin Pop
    """Build hooks env.
2943 a8083063 Iustin Pop

2944 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2945 a8083063 Iustin Pop

2946 a8083063 Iustin Pop
    """
2947 a8083063 Iustin Pop
    env = {
2948 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2949 a8083063 Iustin Pop
      }
2950 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2951 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
2952 a8083063 Iustin Pop
    return env, nl, nl
2953 a8083063 Iustin Pop
2954 a8083063 Iustin Pop
  def CheckPrereq(self):
2955 a8083063 Iustin Pop
    """Check prerequisites.
2956 a8083063 Iustin Pop

2957 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2958 a8083063 Iustin Pop

2959 a8083063 Iustin Pop
    """
2960 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2961 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2962 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2963 a8083063 Iustin Pop
2964 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2965 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2966 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2967 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2968 2a710df1 Michael Hanselmann
2969 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2970 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2971 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2972 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2973 2a710df1 Michael Hanselmann
2974 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2975 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2976 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
2977 338e51e8 Iustin Pop
                         instance.name, bep[constants.BE_MEMORY],
2978 e69d05fd Iustin Pop
                         instance.hypervisor)
2979 3a7c308e Guido Trotter
2980 a8083063 Iustin Pop
    # check bridge existance
2981 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2982 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(target_node, brlist):
2983 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2984 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2985 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2986 a8083063 Iustin Pop
2987 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2988 a8083063 Iustin Pop
    """Failover an instance.
2989 a8083063 Iustin Pop

2990 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2991 a8083063 Iustin Pop
    starting it on the secondary.
2992 a8083063 Iustin Pop

2993 a8083063 Iustin Pop
    """
2994 a8083063 Iustin Pop
    instance = self.instance
2995 a8083063 Iustin Pop
2996 a8083063 Iustin Pop
    source_node = instance.primary_node
2997 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2998 a8083063 Iustin Pop
2999 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3000 a8083063 Iustin Pop
    for dev in instance.disks:
3001 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3002 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3003 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
3004 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3005 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3006 a8083063 Iustin Pop
3007 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3008 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3009 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3010 a8083063 Iustin Pop
3011 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(source_node, instance):
3012 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3013 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3014 86d9d3bb Iustin Pop
                             " Proceeding"
3015 86d9d3bb Iustin Pop
                             " anyway. Please make sure node %s is down",
3016 86d9d3bb Iustin Pop
                             instance.name, source_node, source_node)
3017 24a40d57 Iustin Pop
      else:
3018 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3019 24a40d57 Iustin Pop
                                 (instance.name, source_node))
3020 a8083063 Iustin Pop
3021 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3022 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3023 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3024 a8083063 Iustin Pop
3025 a8083063 Iustin Pop
    instance.primary_node = target_node
3026 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3027 b6102dab Guido Trotter
    self.cfg.Update(instance)
3028 a8083063 Iustin Pop
3029 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3030 12a0cfbe Guido Trotter
    if instance.status == "up":
3031 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3032 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3033 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3034 12a0cfbe Guido Trotter
3035 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3036 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3037 12a0cfbe Guido Trotter
      if not disks_ok:
3038 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3039 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3040 a8083063 Iustin Pop
3041 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3042 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(target_node, instance, None):
3043 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3044 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
3045 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
3046 a8083063 Iustin Pop
3047 a8083063 Iustin Pop
3048 b9bddb6b Iustin Pop
def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
3049 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
3050 a8083063 Iustin Pop

3051 a8083063 Iustin Pop
  This always creates all devices.
3052 a8083063 Iustin Pop

3053 a8083063 Iustin Pop
  """
3054 a8083063 Iustin Pop
  if device.children:
3055 a8083063 Iustin Pop
    for child in device.children:
3056 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
3057 a8083063 Iustin Pop
        return False
3058 a8083063 Iustin Pop
3059 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3060 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3061 72737a7f Iustin Pop
                                       instance.name, True, info)
3062 a8083063 Iustin Pop
  if not new_id:
3063 a8083063 Iustin Pop
    return False
3064 a8083063 Iustin Pop
  if device.physical_id is None:
3065 a8083063 Iustin Pop
    device.physical_id = new_id
3066 a8083063 Iustin Pop
  return True
3067 a8083063 Iustin Pop
3068 a8083063 Iustin Pop
3069 b9bddb6b Iustin Pop
def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
3070 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
3071 a8083063 Iustin Pop

3072 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
3073 a8083063 Iustin Pop
  all its children.
3074 a8083063 Iustin Pop

3075 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
3076 a8083063 Iustin Pop

3077 a8083063 Iustin Pop
  """
3078 a8083063 Iustin Pop
  if device.CreateOnSecondary():
3079 a8083063 Iustin Pop
    force = True
3080 a8083063 Iustin Pop
  if device.children:
3081 a8083063 Iustin Pop
    for child in device.children:
3082 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, node, instance,
3083 3f78eef2 Iustin Pop
                                        child, force, info):
3084 a8083063 Iustin Pop
        return False
3085 a8083063 Iustin Pop
3086 a8083063 Iustin Pop
  if not force:
3087 a8083063 Iustin Pop
    return True
3088 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3089 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3090 72737a7f Iustin Pop
                                       instance.name, False, info)
3091 a8083063 Iustin Pop
  if not new_id:
3092 a8083063 Iustin Pop
    return False
3093 a8083063 Iustin Pop
  if device.physical_id is None:
3094 a8083063 Iustin Pop
    device.physical_id = new_id
3095 a8083063 Iustin Pop
  return True
3096 a8083063 Iustin Pop
3097 a8083063 Iustin Pop
3098 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
3099 923b1523 Iustin Pop
  """Generate a suitable LV name.
3100 923b1523 Iustin Pop

3101 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
3102 923b1523 Iustin Pop

3103 923b1523 Iustin Pop
  """
3104 923b1523 Iustin Pop
  results = []
3105 923b1523 Iustin Pop
  for val in exts:
3106 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
3107 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
3108 923b1523 Iustin Pop
  return results
3109 923b1523 Iustin Pop
3110 923b1523 Iustin Pop
3111 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3112 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
3113 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
3114 a1f445d3 Iustin Pop

3115 a1f445d3 Iustin Pop
  """
3116 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
3117 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3118 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
3119 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3120 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
3121 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3122 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
3123 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3124 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
3125 f9518d38 Iustin Pop
                                      p_minor, s_minor,
3126 f9518d38 Iustin Pop
                                      shared_secret),
3127 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
3128 a1f445d3 Iustin Pop
                          iv_name=iv_name)
3129 a1f445d3 Iustin Pop
  return drbd_dev
3130 a1f445d3 Iustin Pop
3131 7c0d6283 Michael Hanselmann
3132 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
3133 a8083063 Iustin Pop
                          instance_name, primary_node,
3134 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
3135 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
3136 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
3137 a8083063 Iustin Pop

3138 a8083063 Iustin Pop
  """
3139 a8083063 Iustin Pop
  #TODO: compute space requirements
3140 a8083063 Iustin Pop
3141 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3142 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
3143 a8083063 Iustin Pop
    disks = []
3144 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
3145 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
3146 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3147 923b1523 Iustin Pop
3148 b9bddb6b Iustin Pop
    names = _GenerateUniqueNames(lu, [".sda", ".sdb"])
3149 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
3150 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
3151 a8083063 Iustin Pop
                           iv_name = "sda")
3152 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
3153 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
3154 a8083063 Iustin Pop
                           iv_name = "sdb")
3155 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
3156 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
3157 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
3158 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3159 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
3160 ffa1c0dc Iustin Pop
    (minor_pa, minor_pb,
3161 b9bddb6b Iustin Pop
     minor_sa, minor_sb) = lu.cfg.AllocateDRBDMinor(
3162 a1578d63 Iustin Pop
      [primary_node, primary_node, remote_node, remote_node], instance_name)
3163 ffa1c0dc Iustin Pop
3164 b9bddb6b Iustin Pop
    names = _GenerateUniqueNames(lu, [".sda_data", ".sda_meta",
3165 b9bddb6b Iustin Pop
                                      ".sdb_data", ".sdb_meta"])
3166 b9bddb6b Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3167 ffa1c0dc Iustin Pop
                                        disk_sz, names[0:2], "sda",
3168 ffa1c0dc Iustin Pop
                                        minor_pa, minor_sa)
3169 b9bddb6b Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3170 ffa1c0dc Iustin Pop
                                        swap_sz, names[2:4], "sdb",
3171 ffa1c0dc Iustin Pop
                                        minor_pb, minor_sb)
3172 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
3173 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3174 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3175 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3176 0f1a06e3 Manuel Franceschini
3177 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
3178 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
3179 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
3180 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
3181 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
3182 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
3183 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
3184 a8083063 Iustin Pop
  else:
3185 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3186 a8083063 Iustin Pop
  return disks
3187 a8083063 Iustin Pop
3188 a8083063 Iustin Pop
3189 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3190 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3191 3ecf6786 Iustin Pop

3192 3ecf6786 Iustin Pop
  """
3193 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3194 a0c3fea1 Michael Hanselmann
3195 a0c3fea1 Michael Hanselmann
3196 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
3197 a8083063 Iustin Pop
  """Create all disks for an instance.
3198 a8083063 Iustin Pop

3199 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3200 a8083063 Iustin Pop

3201 a8083063 Iustin Pop
  Args:
3202 a8083063 Iustin Pop
    instance: the instance object
3203 a8083063 Iustin Pop

3204 a8083063 Iustin Pop
  Returns:
3205 a8083063 Iustin Pop
    True or False showing the success of the creation process
3206 a8083063 Iustin Pop

3207 a8083063 Iustin Pop
  """
3208 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3209 a0c3fea1 Michael Hanselmann
3210 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3211 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3212 72737a7f Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(instance.primary_node,
3213 72737a7f Iustin Pop
                                                 file_storage_dir)
3214 0f1a06e3 Manuel Franceschini
3215 0f1a06e3 Manuel Franceschini
    if not result:
3216 9a4f63d1 Iustin Pop
      logging.error("Could not connect to node '%s'", instance.primary_node)
3217 0f1a06e3 Manuel Franceschini
      return False
3218 0f1a06e3 Manuel Franceschini
3219 0f1a06e3 Manuel Franceschini
    if not result[0]:
3220 9a4f63d1 Iustin Pop
      logging.error("Failed to create directory '%s'", file_storage_dir)
3221 0f1a06e3 Manuel Franceschini
      return False
3222 0f1a06e3 Manuel Franceschini
3223 a8083063 Iustin Pop
  for device in instance.disks:
3224 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
3225 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
3226 a8083063 Iustin Pop
    #HARDCODE
3227 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3228 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
3229 3f78eef2 Iustin Pop
                                        device, False, info):
3230 9a4f63d1 Iustin Pop
        logging.error("Failed to create volume %s (%s) on secondary node %s!",
3231 9a4f63d1 Iustin Pop
                      device.iv_name, device, secondary_node)
3232 a8083063 Iustin Pop
        return False
3233 a8083063 Iustin Pop
    #HARDCODE
3234 b9bddb6b Iustin Pop
    if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
3235 3f78eef2 Iustin Pop
                                    instance, device, info):
3236 9a4f63d1 Iustin Pop
      logging.error("Failed to create volume %s on primary!", device.iv_name)
3237 a8083063 Iustin Pop
      return False
3238 1c6e3627 Manuel Franceschini
3239 a8083063 Iustin Pop
  return True
3240 a8083063 Iustin Pop
3241 a8083063 Iustin Pop
3242 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
3243 a8083063 Iustin Pop
  """Remove all disks for an instance.
3244 a8083063 Iustin Pop

3245 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3246 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3247 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3248 a8083063 Iustin Pop
  with `_CreateDisks()`).
3249 a8083063 Iustin Pop

3250 a8083063 Iustin Pop
  Args:
3251 a8083063 Iustin Pop
    instance: the instance object
3252 a8083063 Iustin Pop

3253 a8083063 Iustin Pop
  Returns:
3254 a8083063 Iustin Pop
    True or False showing the success of the removal proces
3255 a8083063 Iustin Pop

3256 a8083063 Iustin Pop
  """
3257 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
3258 a8083063 Iustin Pop
3259 a8083063 Iustin Pop
  result = True
3260 a8083063 Iustin Pop
  for device in instance.disks:
3261 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3262 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
3263 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_remove(node, disk):
3264 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not remove block device %s on node %s,"
3265 86d9d3bb Iustin Pop
                           " continuing anyway", device.iv_name, node)
3266 a8083063 Iustin Pop
        result = False
3267 0f1a06e3 Manuel Franceschini
3268 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3269 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3270 72737a7f Iustin Pop
    if not lu.rpc.call_file_storage_dir_remove(instance.primary_node,
3271 72737a7f Iustin Pop
                                               file_storage_dir):
3272 9a4f63d1 Iustin Pop
      logging.error("Could not remove directory '%s'", file_storage_dir)
3273 0f1a06e3 Manuel Franceschini
      result = False
3274 0f1a06e3 Manuel Franceschini
3275 a8083063 Iustin Pop
  return result
3276 a8083063 Iustin Pop
3277 a8083063 Iustin Pop
3278 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
3279 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3280 e2fe6369 Iustin Pop

3281 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
3282 e2fe6369 Iustin Pop

3283 e2fe6369 Iustin Pop
  """
3284 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3285 e2fe6369 Iustin Pop
  req_size_dict = {
3286 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3287 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
3288 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
3289 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
3290 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3291 e2fe6369 Iustin Pop
  }
3292 e2fe6369 Iustin Pop
3293 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3294 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3295 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3296 e2fe6369 Iustin Pop
3297 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3298 e2fe6369 Iustin Pop
3299 e2fe6369 Iustin Pop
3300 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
3301 74409b12 Iustin Pop
  """Hypervisor parameter validation.
3302 74409b12 Iustin Pop

3303 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
3304 74409b12 Iustin Pop
  used in both instance create and instance modify.
3305 74409b12 Iustin Pop

3306 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
3307 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
3308 74409b12 Iustin Pop
  @type nodenames: list
3309 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
3310 74409b12 Iustin Pop
  @type hvname: string
3311 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
3312 74409b12 Iustin Pop
  @type hvparams: dict
3313 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
3314 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
3315 74409b12 Iustin Pop

3316 74409b12 Iustin Pop
  """
3317 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
3318 74409b12 Iustin Pop
                                                  hvname,
3319 74409b12 Iustin Pop
                                                  hvparams)
3320 74409b12 Iustin Pop
  for node in nodenames:
3321 74409b12 Iustin Pop
    info = hvinfo.get(node, None)
3322 74409b12 Iustin Pop
    if not info or not isinstance(info, (tuple, list)):
3323 74409b12 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
3324 74409b12 Iustin Pop
                                 " from node '%s' (%s)" % (node, info))
3325 74409b12 Iustin Pop
    if not info[0]:
3326 74409b12 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
3327 74409b12 Iustin Pop
                                 " %s" % info[1])
3328 74409b12 Iustin Pop
3329 74409b12 Iustin Pop
3330 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3331 a8083063 Iustin Pop
  """Create an instance.
3332 a8083063 Iustin Pop

3333 a8083063 Iustin Pop
  """
3334 a8083063 Iustin Pop
  HPATH = "instance-add"
3335 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3336 338e51e8 Iustin Pop
  _OP_REQP = ["instance_name", "disk_size",
3337 338e51e8 Iustin Pop
              "disk_template", "swap_size", "mode", "start",
3338 338e51e8 Iustin Pop
              "wait_for_sync", "ip_check", "mac",
3339 338e51e8 Iustin Pop
              "hvparams", "beparams"]
3340 7baf741d Guido Trotter
  REQ_BGL = False
3341 7baf741d Guido Trotter
3342 7baf741d Guido Trotter
  def _ExpandNode(self, node):
3343 7baf741d Guido Trotter
    """Expands and checks one node name.
3344 7baf741d Guido Trotter

3345 7baf741d Guido Trotter
    """
3346 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
3347 7baf741d Guido Trotter
    if node_full is None:
3348 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
3349 7baf741d Guido Trotter
    return node_full
3350 7baf741d Guido Trotter
3351 7baf741d Guido Trotter
  def ExpandNames(self):
3352 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
3353 7baf741d Guido Trotter

3354 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
3355 7baf741d Guido Trotter

3356 7baf741d Guido Trotter
    """
3357 7baf741d Guido Trotter
    self.needed_locks = {}
3358 7baf741d Guido Trotter
3359 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
3360 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
3361 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
3362 7baf741d Guido Trotter
        setattr(self.op, attr, None)
3363 7baf741d Guido Trotter
3364 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
3365 4b2f38dd Iustin Pop
3366 7baf741d Guido Trotter
    # verify creation mode
3367 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
3368 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
3369 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3370 7baf741d Guido Trotter
                                 self.op.mode)
3371 4b2f38dd Iustin Pop
3372 7baf741d Guido Trotter
    # disk template and mirror node verification
3373 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3374 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
3375 7baf741d Guido Trotter
3376 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
3377 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
3378 4b2f38dd Iustin Pop
3379 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3380 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
3381 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
3382 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3383 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
3384 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
3385 4b2f38dd Iustin Pop
3386 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
3387 6785674e Iustin Pop
3388 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
3389 8705eb96 Iustin Pop
                                  self.op.hvparams)
3390 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
3391 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
3392 6785674e Iustin Pop
3393 338e51e8 Iustin Pop
    # fill and remember the beparams dict
3394 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
3395 338e51e8 Iustin Pop
                                    self.op.beparams)
3396 338e51e8 Iustin Pop
3397 7baf741d Guido Trotter
    #### instance parameters check
3398 7baf741d Guido Trotter
3399 7baf741d Guido Trotter
    # instance name verification
3400 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
3401 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
3402 7baf741d Guido Trotter
3403 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
3404 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
3405 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
3406 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3407 7baf741d Guido Trotter
                                 instance_name)
3408 7baf741d Guido Trotter
3409 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3410 7baf741d Guido Trotter
3411 7baf741d Guido Trotter
    # ip validity checks
3412 7baf741d Guido Trotter
    ip = getattr(self.op, "ip", None)
3413 7baf741d Guido Trotter
    if ip is None or ip.lower() == "none":
3414 7baf741d Guido Trotter
      inst_ip = None
3415 6fde8221 Guido Trotter
    elif ip.lower() == constants.VALUE_AUTO:
3416 7baf741d Guido Trotter
      inst_ip = hostname1.ip
3417 7baf741d Guido Trotter
    else:
3418 7baf741d Guido Trotter
      if not utils.IsValidIP(ip):
3419 7baf741d Guido Trotter
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3420 7baf741d Guido Trotter
                                   " like a valid IP" % ip)
3421 7baf741d Guido Trotter
      inst_ip = ip
3422 7baf741d Guido Trotter
    self.inst_ip = self.op.ip = inst_ip
3423 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
3424 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
3425 7baf741d Guido Trotter
3426 7baf741d Guido Trotter
    # MAC address verification
3427 c78995f0 Guido Trotter
    if self.op.mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3428 7baf741d Guido Trotter
      if not utils.IsValidMac(self.op.mac.lower()):
3429 7baf741d Guido Trotter
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3430 7baf741d Guido Trotter
                                   self.op.mac)
3431 7baf741d Guido Trotter
3432 7baf741d Guido Trotter
    # file storage checks
3433 7baf741d Guido Trotter
    if (self.op.file_driver and
3434 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
3435 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3436 7baf741d Guido Trotter
                                 self.op.file_driver)
3437 7baf741d Guido Trotter
3438 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3439 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
3440 7baf741d Guido Trotter
3441 7baf741d Guido Trotter
    ### Node/iallocator related checks
3442 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3443 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3444 7baf741d Guido Trotter
                                 " node must be given")
3445 7baf741d Guido Trotter
3446 7baf741d Guido Trotter
    if self.op.iallocator:
3447 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3448 7baf741d Guido Trotter
    else:
3449 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
3450 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
3451 7baf741d Guido Trotter
      if self.op.snode is not None:
3452 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
3453 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
3454 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3455 7baf741d Guido Trotter
3456 7baf741d Guido Trotter
    # in case of import lock the source node too
3457 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
3458 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
3459 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
3460 7baf741d Guido Trotter
3461 7baf741d Guido Trotter
      if src_node is None or src_path is None:
3462 7baf741d Guido Trotter
        raise errors.OpPrereqError("Importing an instance requires source"
3463 7baf741d Guido Trotter
                                   " node and path options")
3464 7baf741d Guido Trotter
3465 7baf741d Guido Trotter
      if not os.path.isabs(src_path):
3466 7baf741d Guido Trotter
        raise errors.OpPrereqError("The source path must be absolute")
3467 7baf741d Guido Trotter
3468 7baf741d Guido Trotter
      self.op.src_node = src_node = self._ExpandNode(src_node)
3469 7baf741d Guido Trotter
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3470 7baf741d Guido Trotter
        self.needed_locks[locking.LEVEL_NODE].append(src_node)
3471 7baf741d Guido Trotter
3472 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
3473 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
3474 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
3475 a8083063 Iustin Pop
3476 538475ca Iustin Pop
  def _RunAllocator(self):
3477 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3478 538475ca Iustin Pop

3479 538475ca Iustin Pop
    """
3480 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3481 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3482 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3483 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3484 72737a7f Iustin Pop
    ial = IAllocator(self,
3485 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3486 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3487 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3488 d1c2dd75 Iustin Pop
                     tags=[],
3489 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3490 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
3491 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
3492 d1c2dd75 Iustin Pop
                     disks=disks,
3493 d1c2dd75 Iustin Pop
                     nics=nics,
3494 29859cb7 Iustin Pop
                     )
3495 d1c2dd75 Iustin Pop
3496 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3497 d1c2dd75 Iustin Pop
3498 d1c2dd75 Iustin Pop
    if not ial.success:
3499 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3500 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3501 d1c2dd75 Iustin Pop
                                                           ial.info))
3502 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3503 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3504 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3505 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
3506 1ce4bbe3 René Nussbaumer
                                  ial.required_nodes))
3507 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3508 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
3509 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
3510 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
3511 27579978 Iustin Pop
    if ial.required_nodes == 2:
3512 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3513 538475ca Iustin Pop
3514 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3515 a8083063 Iustin Pop
    """Build hooks env.
3516 a8083063 Iustin Pop

3517 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3518 a8083063 Iustin Pop

3519 a8083063 Iustin Pop
    """
3520 a8083063 Iustin Pop
    env = {
3521 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3522 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3523 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3524 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3525 a8083063 Iustin Pop
      }
3526 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3527 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3528 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3529 09acf207 Guido Trotter
      env["INSTANCE_SRC_IMAGES"] = self.src_images
3530 396e1b78 Michael Hanselmann
3531 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3532 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3533 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3534 396e1b78 Michael Hanselmann
      status=self.instance_status,
3535 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3536 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
3537 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
3538 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3539 396e1b78 Michael Hanselmann
    ))
3540 a8083063 Iustin Pop
3541 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
3542 a8083063 Iustin Pop
          self.secondaries)
3543 a8083063 Iustin Pop
    return env, nl, nl
3544 a8083063 Iustin Pop
3545 a8083063 Iustin Pop
3546 a8083063 Iustin Pop
  def CheckPrereq(self):
3547 a8083063 Iustin Pop
    """Check prerequisites.
3548 a8083063 Iustin Pop

3549 a8083063 Iustin Pop
    """
3550 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3551 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3552 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3553 eedc99de Manuel Franceschini
                                 " instances")
3554 eedc99de Manuel Franceschini
3555 e69d05fd Iustin Pop
3556 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3557 7baf741d Guido Trotter
      src_node = self.op.src_node
3558 7baf741d Guido Trotter
      src_path = self.op.src_path
3559 a8083063 Iustin Pop
3560 72737a7f Iustin Pop
      export_info = self.rpc.call_export_info(src_node, src_path)
3561 a8083063 Iustin Pop
3562 a8083063 Iustin Pop
      if not export_info:
3563 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3564 a8083063 Iustin Pop
3565 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3566 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3567 a8083063 Iustin Pop
3568 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3569 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3570 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3571 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3572 a8083063 Iustin Pop
3573 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
3574 09acf207 Guido Trotter
      # TODO: substitute "2" with the actual number of disks requested
3575 09acf207 Guido Trotter
      instance_disks = 2
3576 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
3577 09acf207 Guido Trotter
      if instance_disks < export_disks:
3578 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
3579 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
3580 09acf207 Guido Trotter
                                   (2, export_disks))
3581 a8083063 Iustin Pop
3582 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3583 09acf207 Guido Trotter
      disk_images = []
3584 09acf207 Guido Trotter
      for idx in range(export_disks):
3585 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
3586 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
3587 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
3588 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
3589 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
3590 09acf207 Guido Trotter
          disk_images.append(image)
3591 09acf207 Guido Trotter
        else:
3592 09acf207 Guido Trotter
          disk_images.append(False)
3593 09acf207 Guido Trotter
3594 09acf207 Guido Trotter
      self.src_images = disk_images
3595 901a65c1 Iustin Pop
3596 bc89efc3 Guido Trotter
      if self.op.mac == constants.VALUE_AUTO:
3597 bc89efc3 Guido Trotter
        old_name = export_info.get(constants.INISECT_INS, 'name')
3598 bc89efc3 Guido Trotter
        if self.op.instance_name == old_name:
3599 bc89efc3 Guido Trotter
          # FIXME: adjust every nic, when we'll be able to create instances
3600 bc89efc3 Guido Trotter
          # with more than one
3601 bc89efc3 Guido Trotter
          if int(export_info.get(constants.INISECT_INS, 'nic_count')) >= 1:
3602 bc89efc3 Guido Trotter
            self.op.mac = export_info.get(constants.INISECT_INS, 'nic_0_mac')
3603 bc89efc3 Guido Trotter
3604 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3605 901a65c1 Iustin Pop
3606 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3607 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3608 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3609 901a65c1 Iustin Pop
3610 901a65c1 Iustin Pop
    if self.op.ip_check:
3611 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3612 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3613 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
3614 901a65c1 Iustin Pop
3615 901a65c1 Iustin Pop
    # bridge verification
3616 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3617 901a65c1 Iustin Pop
    if bridge is None:
3618 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3619 901a65c1 Iustin Pop
    else:
3620 901a65c1 Iustin Pop
      self.op.bridge = bridge
3621 901a65c1 Iustin Pop
3622 538475ca Iustin Pop
    #### allocator run
3623 538475ca Iustin Pop
3624 538475ca Iustin Pop
    if self.op.iallocator is not None:
3625 538475ca Iustin Pop
      self._RunAllocator()
3626 0f1a06e3 Manuel Franceschini
3627 901a65c1 Iustin Pop
    #### node related checks
3628 901a65c1 Iustin Pop
3629 901a65c1 Iustin Pop
    # check primary node
3630 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3631 7baf741d Guido Trotter
    assert self.pnode is not None, \
3632 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
3633 901a65c1 Iustin Pop
    self.secondaries = []
3634 901a65c1 Iustin Pop
3635 901a65c1 Iustin Pop
    # mirror node verification
3636 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3637 7baf741d Guido Trotter
      if self.op.snode is None:
3638 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3639 3ecf6786 Iustin Pop
                                   " a mirror node")
3640 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
3641 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3642 3ecf6786 Iustin Pop
                                   " the primary node.")
3643 7baf741d Guido Trotter
      self.secondaries.append(self.op.snode)
3644 a8083063 Iustin Pop
3645 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
3646 6785674e Iustin Pop
3647 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3648 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3649 ed1ebc60 Guido Trotter
3650 8d75db10 Iustin Pop
    # Check lv size requirements
3651 8d75db10 Iustin Pop
    if req_size is not None:
3652 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3653 72737a7f Iustin Pop
                                         self.op.hypervisor)
3654 8d75db10 Iustin Pop
      for node in nodenames:
3655 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3656 8d75db10 Iustin Pop
        if not info:
3657 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3658 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3659 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3660 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3661 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3662 8d75db10 Iustin Pop
                                     " node %s" % node)
3663 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3664 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3665 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3666 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3667 ed1ebc60 Guido Trotter
3668 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
3669 6785674e Iustin Pop
3670 a8083063 Iustin Pop
    # os verification
3671 72737a7f Iustin Pop
    os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
3672 dfa96ded Guido Trotter
    if not os_obj:
3673 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3674 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3675 a8083063 Iustin Pop
3676 901a65c1 Iustin Pop
    # bridge check on primary node
3677 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3678 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3679 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3680 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3681 a8083063 Iustin Pop
3682 49ce1563 Iustin Pop
    # memory check on primary node
3683 49ce1563 Iustin Pop
    if self.op.start:
3684 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
3685 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3686 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
3687 338e51e8 Iustin Pop
                           self.op.hypervisor)
3688 49ce1563 Iustin Pop
3689 a8083063 Iustin Pop
    if self.op.start:
3690 a8083063 Iustin Pop
      self.instance_status = 'up'
3691 a8083063 Iustin Pop
    else:
3692 a8083063 Iustin Pop
      self.instance_status = 'down'
3693 a8083063 Iustin Pop
3694 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3695 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3696 a8083063 Iustin Pop

3697 a8083063 Iustin Pop
    """
3698 a8083063 Iustin Pop
    instance = self.op.instance_name
3699 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3700 a8083063 Iustin Pop
3701 c78995f0 Guido Trotter
    if self.op.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3702 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3703 1862d460 Alexander Schreiber
    else:
3704 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3705 1862d460 Alexander Schreiber
3706 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3707 a8083063 Iustin Pop
    if self.inst_ip is not None:
3708 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3709 a8083063 Iustin Pop
3710 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
3711 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3712 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3713 2a6469d5 Alexander Schreiber
    else:
3714 2a6469d5 Alexander Schreiber
      network_port = None
3715 58acb49d Alexander Schreiber
3716 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
3717 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3718 31a853d2 Iustin Pop
3719 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3720 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3721 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3722 2c313123 Manuel Franceschini
    else:
3723 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3724 2c313123 Manuel Franceschini
3725 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3726 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3727 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
3728 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3729 0f1a06e3 Manuel Franceschini
3730 0f1a06e3 Manuel Franceschini
3731 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
3732 a8083063 Iustin Pop
                                  self.op.disk_template,
3733 a8083063 Iustin Pop
                                  instance, pnode_name,
3734 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3735 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3736 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3737 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3738 a8083063 Iustin Pop
3739 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3740 a8083063 Iustin Pop
                            primary_node=pnode_name,
3741 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3742 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3743 a8083063 Iustin Pop
                            status=self.instance_status,
3744 58acb49d Alexander Schreiber
                            network_port=network_port,
3745 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
3746 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
3747 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
3748 a8083063 Iustin Pop
                            )
3749 a8083063 Iustin Pop
3750 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3751 b9bddb6b Iustin Pop
    if not _CreateDisks(self, iobj):
3752 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3753 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance)
3754 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3755 a8083063 Iustin Pop
3756 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3757 a8083063 Iustin Pop
3758 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3759 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
3760 7baf741d Guido Trotter
    # added the instance to the config
3761 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
3762 a1578d63 Iustin Pop
    # Remove the temp. assignements for the instance's drbds
3763 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance)
3764 a8083063 Iustin Pop
3765 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3766 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
3767 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3768 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3769 a8083063 Iustin Pop
      time.sleep(15)
3770 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3771 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
3772 a8083063 Iustin Pop
    else:
3773 a8083063 Iustin Pop
      disk_abort = False
3774 a8083063 Iustin Pop
3775 a8083063 Iustin Pop
    if disk_abort:
3776 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3777 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3778 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
3779 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3780 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3781 3ecf6786 Iustin Pop
                               " this instance")
3782 a8083063 Iustin Pop
3783 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3784 a8083063 Iustin Pop
                (instance, pnode_name))
3785 a8083063 Iustin Pop
3786 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3787 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3788 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3789 d15a9ad3 Guido Trotter
        if not self.rpc.call_instance_os_add(pnode_name, iobj):
3790 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3791 3ecf6786 Iustin Pop
                                   " on node %s" %
3792 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3793 a8083063 Iustin Pop
3794 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3795 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3796 a8083063 Iustin Pop
        src_node = self.op.src_node
3797 09acf207 Guido Trotter
        src_images = self.src_images
3798 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
3799 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
3800 09acf207 Guido Trotter
                                                         src_node, src_images,
3801 6c0af70e Guido Trotter
                                                         cluster_name)
3802 09acf207 Guido Trotter
        for idx, result in enumerate(import_result):
3803 09acf207 Guido Trotter
          if not result:
3804 09acf207 Guido Trotter
            self.LogWarning("Could not image %s for on instance %s, disk %d,"
3805 09acf207 Guido Trotter
                            " on node %s" % (src_images[idx], instance, idx,
3806 09acf207 Guido Trotter
                                             pnode_name))
3807 a8083063 Iustin Pop
      else:
3808 a8083063 Iustin Pop
        # also checked in the prereq part
3809 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3810 3ecf6786 Iustin Pop
                                     % self.op.mode)
3811 a8083063 Iustin Pop
3812 a8083063 Iustin Pop
    if self.op.start:
3813 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
3814 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3815 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(pnode_name, iobj, None):
3816 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3817 a8083063 Iustin Pop
3818 a8083063 Iustin Pop
3819 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3820 a8083063 Iustin Pop
  """Connect to an instance's console.
3821 a8083063 Iustin Pop

3822 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3823 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3824 a8083063 Iustin Pop
  console.
3825 a8083063 Iustin Pop

3826 a8083063 Iustin Pop
  """
3827 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3828 8659b73e Guido Trotter
  REQ_BGL = False
3829 8659b73e Guido Trotter
3830 8659b73e Guido Trotter
  def ExpandNames(self):
3831 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3832 a8083063 Iustin Pop
3833 a8083063 Iustin Pop
  def CheckPrereq(self):
3834 a8083063 Iustin Pop
    """Check prerequisites.
3835 a8083063 Iustin Pop

3836 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3837 a8083063 Iustin Pop

3838 a8083063 Iustin Pop
    """
3839 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3840 8659b73e Guido Trotter
    assert self.instance is not None, \
3841 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3842 a8083063 Iustin Pop
3843 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3844 a8083063 Iustin Pop
    """Connect to the console of an instance
3845 a8083063 Iustin Pop

3846 a8083063 Iustin Pop
    """
3847 a8083063 Iustin Pop
    instance = self.instance
3848 a8083063 Iustin Pop
    node = instance.primary_node
3849 a8083063 Iustin Pop
3850 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
3851 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
3852 a8083063 Iustin Pop
    if node_insts is False:
3853 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3854 a8083063 Iustin Pop
3855 a8083063 Iustin Pop
    if instance.name not in node_insts:
3856 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3857 a8083063 Iustin Pop
3858 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
3859 a8083063 Iustin Pop
3860 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
3861 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3862 b047857b Michael Hanselmann
3863 82122173 Iustin Pop
    # build ssh cmdline
3864 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3865 a8083063 Iustin Pop
3866 a8083063 Iustin Pop
3867 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3868 a8083063 Iustin Pop
  """Replace the disks of an instance.
3869 a8083063 Iustin Pop

3870 a8083063 Iustin Pop
  """
3871 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3872 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3873 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3874 efd990e4 Guido Trotter
  REQ_BGL = False
3875 efd990e4 Guido Trotter
3876 efd990e4 Guido Trotter
  def ExpandNames(self):
3877 efd990e4 Guido Trotter
    self._ExpandAndLockInstance()
3878 efd990e4 Guido Trotter
3879 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
3880 efd990e4 Guido Trotter
      self.op.remote_node = None
3881 efd990e4 Guido Trotter
3882 efd990e4 Guido Trotter
    ia_name = getattr(self.op, "iallocator", None)
3883 efd990e4 Guido Trotter
    if ia_name is not None:
3884 efd990e4 Guido Trotter
      if self.op.remote_node is not None:
3885 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
3886 efd990e4 Guido Trotter
                                   " secondary, not both")
3887 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3888 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
3889 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3890 efd990e4 Guido Trotter
      if remote_node is None:
3891 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
3892 efd990e4 Guido Trotter
                                   self.op.remote_node)
3893 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
3894 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
3895 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3896 efd990e4 Guido Trotter
    else:
3897 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
3898 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3899 efd990e4 Guido Trotter
3900 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
3901 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
3902 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
3903 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
3904 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
3905 efd990e4 Guido Trotter
      self._LockInstancesNodes()
3906 a8083063 Iustin Pop
3907 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3908 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3909 b6e82a65 Iustin Pop

3910 b6e82a65 Iustin Pop
    """
3911 72737a7f Iustin Pop
    ial = IAllocator(self,
3912 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3913 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3914 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3915 b6e82a65 Iustin Pop
3916 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3917 b6e82a65 Iustin Pop
3918 b6e82a65 Iustin Pop
    if not ial.success:
3919 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3920 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3921 b6e82a65 Iustin Pop
                                                           ial.info))
3922 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3923 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3924 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3925 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3926 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3927 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
3928 86d9d3bb Iustin Pop
                 self.op.remote_node)
3929 b6e82a65 Iustin Pop
3930 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3931 a8083063 Iustin Pop
    """Build hooks env.
3932 a8083063 Iustin Pop

3933 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3934 a8083063 Iustin Pop

3935 a8083063 Iustin Pop
    """
3936 a8083063 Iustin Pop
    env = {
3937 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3938 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3939 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3940 a8083063 Iustin Pop
      }
3941 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3942 0834c866 Iustin Pop
    nl = [
3943 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
3944 0834c866 Iustin Pop
      self.instance.primary_node,
3945 0834c866 Iustin Pop
      ]
3946 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3947 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3948 a8083063 Iustin Pop
    return env, nl, nl
3949 a8083063 Iustin Pop
3950 a8083063 Iustin Pop
  def CheckPrereq(self):
3951 a8083063 Iustin Pop
    """Check prerequisites.
3952 a8083063 Iustin Pop

3953 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3954 a8083063 Iustin Pop

3955 a8083063 Iustin Pop
    """
3956 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3957 efd990e4 Guido Trotter
    assert instance is not None, \
3958 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3959 a8083063 Iustin Pop
    self.instance = instance
3960 a8083063 Iustin Pop
3961 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3962 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3963 a9e0c397 Iustin Pop
                                 " network mirrored.")
3964 a8083063 Iustin Pop
3965 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3966 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3967 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3968 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3969 a8083063 Iustin Pop
3970 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3971 a9e0c397 Iustin Pop
3972 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3973 b6e82a65 Iustin Pop
    if ia_name is not None:
3974 de8c7666 Guido Trotter
      self._RunAllocator()
3975 b6e82a65 Iustin Pop
3976 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3977 a9e0c397 Iustin Pop
    if remote_node is not None:
3978 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3979 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
3980 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
3981 a9e0c397 Iustin Pop
    else:
3982 a9e0c397 Iustin Pop
      self.remote_node_info = None
3983 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3984 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3985 3ecf6786 Iustin Pop
                                 " the instance.")
3986 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3987 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3988 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3989 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3990 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3991 0834c866 Iustin Pop
                                   " replacement")
3992 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3993 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3994 7df43a76 Iustin Pop
          remote_node is not None):
3995 7df43a76 Iustin Pop
        # switch to replace secondary mode
3996 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3997 7df43a76 Iustin Pop
3998 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3999 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
4000 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
4001 a9e0c397 Iustin Pop
                                   " both at once")
4002 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
4003 a9e0c397 Iustin Pop
        if remote_node is not None:
4004 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
4005 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
4006 a9e0c397 Iustin Pop
                                     " node disk replacement")
4007 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
4008 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
4009 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
4010 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
4011 a9e0c397 Iustin Pop
                                    # we don't change the secondary
4012 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
4013 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
4014 a9e0c397 Iustin Pop
      else:
4015 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
4016 a9e0c397 Iustin Pop
4017 a9e0c397 Iustin Pop
    for name in self.op.disks:
4018 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
4019 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4020 a9e0c397 Iustin Pop
                                   (name, instance.name))
4021 a8083063 Iustin Pop
4022 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
4023 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
4024 a9e0c397 Iustin Pop

4025 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4026 a9e0c397 Iustin Pop
      - for each disk to be replaced:
4027 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
4028 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
4029 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
4030 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
4031 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
4032 a9e0c397 Iustin Pop
      - wait for sync across all devices
4033 a9e0c397 Iustin Pop
      - for each modified disk:
4034 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
4035 a9e0c397 Iustin Pop

4036 a9e0c397 Iustin Pop
    Failures are not very well handled.
4037 cff90b79 Iustin Pop

4038 a9e0c397 Iustin Pop
    """
4039 cff90b79 Iustin Pop
    steps_total = 6
4040 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4041 a9e0c397 Iustin Pop
    instance = self.instance
4042 a9e0c397 Iustin Pop
    iv_names = {}
4043 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4044 a9e0c397 Iustin Pop
    # start of work
4045 a9e0c397 Iustin Pop
    cfg = self.cfg
4046 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
4047 cff90b79 Iustin Pop
    oth_node = self.oth_node
4048 cff90b79 Iustin Pop
4049 cff90b79 Iustin Pop
    # Step: check device activation
4050 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4051 cff90b79 Iustin Pop
    info("checking volume groups")
4052 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
4053 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
4054 cff90b79 Iustin Pop
    if not results:
4055 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4056 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
4057 cff90b79 Iustin Pop
      res = results.get(node, False)
4058 cff90b79 Iustin Pop
      if not res or my_vg not in res:
4059 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4060 cff90b79 Iustin Pop
                                 (my_vg, node))
4061 cff90b79 Iustin Pop
    for dev in instance.disks:
4062 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
4063 cff90b79 Iustin Pop
        continue
4064 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
4065 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
4066 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
4067 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_find(node, dev):
4068 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
4069 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
4070 cff90b79 Iustin Pop
4071 cff90b79 Iustin Pop
    # Step: check other node consistency
4072 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4073 cff90b79 Iustin Pop
    for dev in instance.disks:
4074 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
4075 cff90b79 Iustin Pop
        continue
4076 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
4077 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
4078 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
4079 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
4080 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
4081 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
4082 cff90b79 Iustin Pop
4083 cff90b79 Iustin Pop
    # Step: create new storage
4084 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4085 a9e0c397 Iustin Pop
    for dev in instance.disks:
4086 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
4087 a9e0c397 Iustin Pop
        continue
4088 a9e0c397 Iustin Pop
      size = dev.size
4089 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
4090 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
4091 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
4092 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4093 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
4094 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4095 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
4096 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
4097 a9e0c397 Iustin Pop
      old_lvs = dev.children
4098 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
4099 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
4100 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
4101 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4102 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4103 a9e0c397 Iustin Pop
      # are talking about the secondary node
4104 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
4105 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
4106 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4107 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4108 a9e0c397 Iustin Pop
                                   " node '%s'" %
4109 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
4110 a9e0c397 Iustin Pop
4111 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
4112 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
4113 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
4114 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
4115 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
4116 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
4117 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
4118 cff90b79 Iustin Pop
      #dev.children = []
4119 cff90b79 Iustin Pop
      #cfg.Update(instance)
4120 a9e0c397 Iustin Pop
4121 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
4122 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
4123 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
4124 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
4125 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
4126 cff90b79 Iustin Pop
4127 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
4128 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
4129 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
4130 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
4131 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
4132 cff90b79 Iustin Pop
      rlist = []
4133 cff90b79 Iustin Pop
      for to_ren in old_lvs:
4134 72737a7f Iustin Pop
        find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
4135 cff90b79 Iustin Pop
        if find_res is not None: # device exists
4136 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
4137 cff90b79 Iustin Pop
4138 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
4139 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4140 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
4141 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
4142 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
4143 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
4144 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4145 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
4146 cff90b79 Iustin Pop
4147 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
4148 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
4149 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
4150 a9e0c397 Iustin Pop
4151 cff90b79 Iustin Pop
      for disk in old_lvs:
4152 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
4153 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
4154 a9e0c397 Iustin Pop
4155 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
4156 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
4157 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
4158 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
4159 72737a7f Iustin Pop
          if not self.rpc.call_blockdev_remove(tgt_node, new_lv):
4160 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
4161 cff90b79 Iustin Pop
                    " logical volumes")
4162 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
4163 a9e0c397 Iustin Pop
4164 a9e0c397 Iustin Pop
      dev.children = new_lvs
4165 a9e0c397 Iustin Pop
      cfg.Update(instance)
4166 a9e0c397 Iustin Pop
4167 cff90b79 Iustin Pop
    # Step: wait for sync
4168 a9e0c397 Iustin Pop
4169 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4170 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4171 a9e0c397 Iustin Pop
    # return value
4172 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4173 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4174 a9e0c397 Iustin Pop
4175 a9e0c397 Iustin Pop
    # so check manually all the devices
4176 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4177 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
4178 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(instance.primary_node, dev)[5]
4179 a9e0c397 Iustin Pop
      if is_degr:
4180 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4181 a9e0c397 Iustin Pop
4182 cff90b79 Iustin Pop
    # Step: remove old storage
4183 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4184 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4185 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
4186 a9e0c397 Iustin Pop
      for lv in old_lvs:
4187 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
4188 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(tgt_node, lv):
4189 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
4190 a9e0c397 Iustin Pop
          continue
4191 a9e0c397 Iustin Pop
4192 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
4193 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
4194 a9e0c397 Iustin Pop

4195 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4196 a9e0c397 Iustin Pop
      - for all disks of the instance:
4197 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
4198 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
4199 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
4200 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
4201 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
4202 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
4203 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
4204 a9e0c397 Iustin Pop
          not network enabled
4205 a9e0c397 Iustin Pop
      - wait for sync across all devices
4206 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
4207 a9e0c397 Iustin Pop

4208 a9e0c397 Iustin Pop
    Failures are not very well handled.
4209 0834c866 Iustin Pop

4210 a9e0c397 Iustin Pop
    """
4211 0834c866 Iustin Pop
    steps_total = 6
4212 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4213 a9e0c397 Iustin Pop
    instance = self.instance
4214 a9e0c397 Iustin Pop
    iv_names = {}
4215 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4216 a9e0c397 Iustin Pop
    # start of work
4217 a9e0c397 Iustin Pop
    cfg = self.cfg
4218 a9e0c397 Iustin Pop
    old_node = self.tgt_node
4219 a9e0c397 Iustin Pop
    new_node = self.new_node
4220 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
4221 0834c866 Iustin Pop
4222 0834c866 Iustin Pop
    # Step: check device activation
4223 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4224 0834c866 Iustin Pop
    info("checking volume groups")
4225 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
4226 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
4227 0834c866 Iustin Pop
    if not results:
4228 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4229 0834c866 Iustin Pop
    for node in pri_node, new_node:
4230 0834c866 Iustin Pop
      res = results.get(node, False)
4231 0834c866 Iustin Pop
      if not res or my_vg not in res:
4232 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4233 0834c866 Iustin Pop
                                 (my_vg, node))
4234 0834c866 Iustin Pop
    for dev in instance.disks:
4235 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4236 0834c866 Iustin Pop
        continue
4237 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
4238 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4239 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4240 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
4241 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
4242 0834c866 Iustin Pop
4243 0834c866 Iustin Pop
    # Step: check other node consistency
4244 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4245 0834c866 Iustin Pop
    for dev in instance.disks:
4246 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4247 0834c866 Iustin Pop
        continue
4248 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
4249 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
4250 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4251 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4252 0834c866 Iustin Pop
                                 pri_node)
4253 0834c866 Iustin Pop
4254 0834c866 Iustin Pop
    # Step: create new storage
4255 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4256 468b46f9 Iustin Pop
    for dev in instance.disks:
4257 a9e0c397 Iustin Pop
      size = dev.size
4258 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
4259 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4260 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4261 a9e0c397 Iustin Pop
      # are talking about the secondary node
4262 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4263 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
4264 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4265 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4266 a9e0c397 Iustin Pop
                                   " node '%s'" %
4267 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4268 a9e0c397 Iustin Pop
4269 0834c866 Iustin Pop
4270 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
4271 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
4272 a1578d63 Iustin Pop
    # error and the success paths
4273 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4274 a1578d63 Iustin Pop
                                   instance.name)
4275 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
4276 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4277 468b46f9 Iustin Pop
    for dev, new_minor in zip(instance.disks, minors):
4278 0834c866 Iustin Pop
      size = dev.size
4279 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
4280 a9e0c397 Iustin Pop
      # create new devices on new_node
4281 ffa1c0dc Iustin Pop
      if pri_node == dev.logical_id[0]:
4282 ffa1c0dc Iustin Pop
        new_logical_id = (pri_node, new_node,
4283 f9518d38 Iustin Pop
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4284 f9518d38 Iustin Pop
                          dev.logical_id[5])
4285 ffa1c0dc Iustin Pop
      else:
4286 ffa1c0dc Iustin Pop
        new_logical_id = (new_node, pri_node,
4287 f9518d38 Iustin Pop
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4288 f9518d38 Iustin Pop
                          dev.logical_id[5])
4289 468b46f9 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children, new_logical_id)
4290 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4291 a1578d63 Iustin Pop
                    new_logical_id)
4292 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4293 ffa1c0dc Iustin Pop
                              logical_id=new_logical_id,
4294 a9e0c397 Iustin Pop
                              children=dev.children)
4295 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(self, new_node, instance,
4296 3f78eef2 Iustin Pop
                                        new_drbd, False,
4297 b9bddb6b Iustin Pop
                                        _GetInstanceInfoText(instance)):
4298 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4299 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4300 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4301 a9e0c397 Iustin Pop
4302 0834c866 Iustin Pop
    for dev in instance.disks:
4303 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4304 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
4305 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4306 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_shutdown(old_node, dev):
4307 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
4308 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4309 a9e0c397 Iustin Pop
4310 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4311 642445d9 Iustin Pop
    done = 0
4312 642445d9 Iustin Pop
    for dev in instance.disks:
4313 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4314 f9518d38 Iustin Pop
      # set the network part of the physical (unique in bdev terms) id
4315 f9518d38 Iustin Pop
      # to None, meaning detach from network
4316 f9518d38 Iustin Pop
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4317 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4318 642445d9 Iustin Pop
      # standalone state
4319 72737a7f Iustin Pop
      if self.rpc.call_blockdev_find(pri_node, dev):
4320 642445d9 Iustin Pop
        done += 1
4321 642445d9 Iustin Pop
      else:
4322 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
4323 642445d9 Iustin Pop
                dev.iv_name)
4324 642445d9 Iustin Pop
4325 642445d9 Iustin Pop
    if not done:
4326 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4327 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
4328 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4329 642445d9 Iustin Pop
4330 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4331 642445d9 Iustin Pop
    # the instance to point to the new secondary
4332 642445d9 Iustin Pop
    info("updating instance configuration")
4333 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
4334 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
4335 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4336 642445d9 Iustin Pop
    cfg.Update(instance)
4337 a1578d63 Iustin Pop
    # we can remove now the temp minors as now the new values are
4338 a1578d63 Iustin Pop
    # written to the config file (and therefore stable)
4339 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance.name)
4340 a9e0c397 Iustin Pop
4341 642445d9 Iustin Pop
    # and now perform the drbd attach
4342 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4343 642445d9 Iustin Pop
    failures = []
4344 642445d9 Iustin Pop
    for dev in instance.disks:
4345 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
4346 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4347 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4348 642445d9 Iustin Pop
      # is correct
4349 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4350 ffa1c0dc Iustin Pop
      logging.debug("Disk to attach: %s", dev)
4351 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4352 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
4353 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4354 a9e0c397 Iustin Pop
4355 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4356 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4357 a9e0c397 Iustin Pop
    # return value
4358 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4359 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4360 a9e0c397 Iustin Pop
4361 a9e0c397 Iustin Pop
    # so check manually all the devices
4362 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4363 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4364 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(pri_node, dev)[5]
4365 a9e0c397 Iustin Pop
      if is_degr:
4366 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4367 a9e0c397 Iustin Pop
4368 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4369 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4370 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
4371 a9e0c397 Iustin Pop
      for lv in old_lvs:
4372 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4373 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(old_node, lv):
4374 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4375 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4376 a9e0c397 Iustin Pop
4377 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4378 a9e0c397 Iustin Pop
    """Execute disk replacement.
4379 a9e0c397 Iustin Pop

4380 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4381 a9e0c397 Iustin Pop

4382 a9e0c397 Iustin Pop
    """
4383 a9e0c397 Iustin Pop
    instance = self.instance
4384 22985314 Guido Trotter
4385 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
4386 22985314 Guido Trotter
    if instance.status == "down":
4387 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
4388 22985314 Guido Trotter
4389 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4390 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4391 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4392 a9e0c397 Iustin Pop
      else:
4393 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4394 a9e0c397 Iustin Pop
    else:
4395 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4396 22985314 Guido Trotter
4397 22985314 Guido Trotter
    ret = fn(feedback_fn)
4398 22985314 Guido Trotter
4399 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
4400 22985314 Guido Trotter
    if instance.status == "down":
4401 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
4402 22985314 Guido Trotter
4403 22985314 Guido Trotter
    return ret
4404 a9e0c397 Iustin Pop
4405 a8083063 Iustin Pop
4406 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4407 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4408 8729e0d7 Iustin Pop

4409 8729e0d7 Iustin Pop
  """
4410 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4411 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4412 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
4413 31e63dbf Guido Trotter
  REQ_BGL = False
4414 31e63dbf Guido Trotter
4415 31e63dbf Guido Trotter
  def ExpandNames(self):
4416 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4417 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4418 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4419 31e63dbf Guido Trotter
4420 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4421 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4422 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4423 8729e0d7 Iustin Pop
4424 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4425 8729e0d7 Iustin Pop
    """Build hooks env.
4426 8729e0d7 Iustin Pop

4427 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4428 8729e0d7 Iustin Pop

4429 8729e0d7 Iustin Pop
    """
4430 8729e0d7 Iustin Pop
    env = {
4431 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4432 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4433 8729e0d7 Iustin Pop
      }
4434 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4435 8729e0d7 Iustin Pop
    nl = [
4436 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4437 8729e0d7 Iustin Pop
      self.instance.primary_node,
4438 8729e0d7 Iustin Pop
      ]
4439 8729e0d7 Iustin Pop
    return env, nl, nl
4440 8729e0d7 Iustin Pop
4441 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4442 8729e0d7 Iustin Pop
    """Check prerequisites.
4443 8729e0d7 Iustin Pop

4444 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4445 8729e0d7 Iustin Pop

4446 8729e0d7 Iustin Pop
    """
4447 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4448 31e63dbf Guido Trotter
    assert instance is not None, \
4449 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4450 31e63dbf Guido Trotter
4451 8729e0d7 Iustin Pop
    self.instance = instance
4452 8729e0d7 Iustin Pop
4453 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4454 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4455 8729e0d7 Iustin Pop
                                 " growing.")
4456 8729e0d7 Iustin Pop
4457 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
4458 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4459 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
4460 8729e0d7 Iustin Pop
4461 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4462 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4463 72737a7f Iustin Pop
                                       instance.hypervisor)
4464 8729e0d7 Iustin Pop
    for node in nodenames:
4465 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4466 8729e0d7 Iustin Pop
      if not info:
4467 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4468 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4469 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4470 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4471 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4472 8729e0d7 Iustin Pop
                                   " node %s" % node)
4473 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4474 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4475 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4476 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4477 8729e0d7 Iustin Pop
4478 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4479 8729e0d7 Iustin Pop
    """Execute disk grow.
4480 8729e0d7 Iustin Pop

4481 8729e0d7 Iustin Pop
    """
4482 8729e0d7 Iustin Pop
    instance = self.instance
4483 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
4484 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4485 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4486 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
4487 72737a7f Iustin Pop
      if (not result or not isinstance(result, (list, tuple)) or
4488 72737a7f Iustin Pop
          len(result) != 2):
4489 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4490 8729e0d7 Iustin Pop
      elif not result[0]:
4491 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4492 8729e0d7 Iustin Pop
                                 (node, result[1]))
4493 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4494 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4495 6605411d Iustin Pop
    if self.op.wait_for_sync:
4496 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
4497 6605411d Iustin Pop
      if disk_abort:
4498 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
4499 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
4500 8729e0d7 Iustin Pop
4501 8729e0d7 Iustin Pop
4502 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4503 a8083063 Iustin Pop
  """Query runtime instance data.
4504 a8083063 Iustin Pop

4505 a8083063 Iustin Pop
  """
4506 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
4507 a987fa48 Guido Trotter
  REQ_BGL = False
4508 ae5849b5 Michael Hanselmann
4509 a987fa48 Guido Trotter
  def ExpandNames(self):
4510 a987fa48 Guido Trotter
    self.needed_locks = {}
4511 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4512 a987fa48 Guido Trotter
4513 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
4514 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4515 a987fa48 Guido Trotter
4516 a987fa48 Guido Trotter
    if self.op.instances:
4517 a987fa48 Guido Trotter
      self.wanted_names = []
4518 a987fa48 Guido Trotter
      for name in self.op.instances:
4519 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
4520 a987fa48 Guido Trotter
        if full_name is None:
4521 a987fa48 Guido Trotter
          raise errors.OpPrereqError("Instance '%s' not known" %
4522 a987fa48 Guido Trotter
                                     self.op.instance_name)
4523 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
4524 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4525 a987fa48 Guido Trotter
    else:
4526 a987fa48 Guido Trotter
      self.wanted_names = None
4527 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4528 a987fa48 Guido Trotter
4529 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4530 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4531 a987fa48 Guido Trotter
4532 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
4533 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
4534 a987fa48 Guido Trotter
      self._LockInstancesNodes()
4535 a8083063 Iustin Pop
4536 a8083063 Iustin Pop
  def CheckPrereq(self):
4537 a8083063 Iustin Pop
    """Check prerequisites.
4538 a8083063 Iustin Pop

4539 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4540 a8083063 Iustin Pop

4541 a8083063 Iustin Pop
    """
4542 a987fa48 Guido Trotter
    if self.wanted_names is None:
4543 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4544 a8083063 Iustin Pop
4545 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4546 a987fa48 Guido Trotter
                             in self.wanted_names]
4547 a987fa48 Guido Trotter
    return
4548 a8083063 Iustin Pop
4549 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4550 a8083063 Iustin Pop
    """Compute block device status.
4551 a8083063 Iustin Pop

4552 a8083063 Iustin Pop
    """
4553 57821cac Iustin Pop
    static = self.op.static
4554 57821cac Iustin Pop
    if not static:
4555 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
4556 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
4557 57821cac Iustin Pop
    else:
4558 57821cac Iustin Pop
      dev_pstatus = None
4559 57821cac Iustin Pop
4560 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4561 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4562 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4563 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4564 a8083063 Iustin Pop
      else:
4565 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4566 a8083063 Iustin Pop
4567 57821cac Iustin Pop
    if snode and not static:
4568 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4569 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
4570 a8083063 Iustin Pop
    else:
4571 a8083063 Iustin Pop
      dev_sstatus = None
4572 a8083063 Iustin Pop
4573 a8083063 Iustin Pop
    if dev.children:
4574 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4575 a8083063 Iustin Pop
                      for child in dev.children]
4576 a8083063 Iustin Pop
    else:
4577 a8083063 Iustin Pop
      dev_children = []
4578 a8083063 Iustin Pop
4579 a8083063 Iustin Pop
    data = {
4580 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4581 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4582 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4583 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4584 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4585 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4586 a8083063 Iustin Pop
      "children": dev_children,
4587 a8083063 Iustin Pop
      }
4588 a8083063 Iustin Pop
4589 a8083063 Iustin Pop
    return data
4590 a8083063 Iustin Pop
4591 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4592 a8083063 Iustin Pop
    """Gather and return data"""
4593 a8083063 Iustin Pop
    result = {}
4594 338e51e8 Iustin Pop
4595 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4596 338e51e8 Iustin Pop
4597 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4598 57821cac Iustin Pop
      if not self.op.static:
4599 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
4600 57821cac Iustin Pop
                                                  instance.name,
4601 57821cac Iustin Pop
                                                  instance.hypervisor)
4602 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
4603 57821cac Iustin Pop
          remote_state = "up"
4604 57821cac Iustin Pop
        else:
4605 57821cac Iustin Pop
          remote_state = "down"
4606 a8083063 Iustin Pop
      else:
4607 57821cac Iustin Pop
        remote_state = None
4608 a8083063 Iustin Pop
      if instance.status == "down":
4609 a8083063 Iustin Pop
        config_state = "down"
4610 a8083063 Iustin Pop
      else:
4611 a8083063 Iustin Pop
        config_state = "up"
4612 a8083063 Iustin Pop
4613 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4614 a8083063 Iustin Pop
               for device in instance.disks]
4615 a8083063 Iustin Pop
4616 a8083063 Iustin Pop
      idict = {
4617 a8083063 Iustin Pop
        "name": instance.name,
4618 a8083063 Iustin Pop
        "config_state": config_state,
4619 a8083063 Iustin Pop
        "run_state": remote_state,
4620 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4621 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4622 a8083063 Iustin Pop
        "os": instance.os,
4623 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4624 a8083063 Iustin Pop
        "disks": disks,
4625 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
4626 24838135 Iustin Pop
        "network_port": instance.network_port,
4627 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
4628 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
4629 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
4630 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
4631 a8083063 Iustin Pop
        }
4632 a8083063 Iustin Pop
4633 a8083063 Iustin Pop
      result[instance.name] = idict
4634 a8083063 Iustin Pop
4635 a8083063 Iustin Pop
    return result
4636 a8083063 Iustin Pop
4637 a8083063 Iustin Pop
4638 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4639 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4640 a8083063 Iustin Pop

4641 a8083063 Iustin Pop
  """
4642 a8083063 Iustin Pop
  HPATH = "instance-modify"
4643 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4644 74409b12 Iustin Pop
  _OP_REQP = ["instance_name", "hvparams"]
4645 1a5c7281 Guido Trotter
  REQ_BGL = False
4646 1a5c7281 Guido Trotter
4647 1a5c7281 Guido Trotter
  def ExpandNames(self):
4648 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4649 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
4650 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4651 74409b12 Iustin Pop
4652 74409b12 Iustin Pop
4653 74409b12 Iustin Pop
  def DeclareLocks(self, level):
4654 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
4655 74409b12 Iustin Pop
      self._LockInstancesNodes()
4656 a8083063 Iustin Pop
4657 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4658 a8083063 Iustin Pop
    """Build hooks env.
4659 a8083063 Iustin Pop

4660 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4661 a8083063 Iustin Pop

4662 a8083063 Iustin Pop
    """
4663 396e1b78 Michael Hanselmann
    args = dict()
4664 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
4665 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
4666 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
4667 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
4668 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4669 396e1b78 Michael Hanselmann
      if self.do_ip:
4670 396e1b78 Michael Hanselmann
        ip = self.ip
4671 396e1b78 Michael Hanselmann
      else:
4672 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4673 396e1b78 Michael Hanselmann
      if self.bridge:
4674 396e1b78 Michael Hanselmann
        bridge = self.bridge
4675 396e1b78 Michael Hanselmann
      else:
4676 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4677 ef756965 Iustin Pop
      if self.mac:
4678 ef756965 Iustin Pop
        mac = self.mac
4679 ef756965 Iustin Pop
      else:
4680 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4681 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4682 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
4683 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(),
4684 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4685 a8083063 Iustin Pop
    return env, nl, nl
4686 a8083063 Iustin Pop
4687 a8083063 Iustin Pop
  def CheckPrereq(self):
4688 a8083063 Iustin Pop
    """Check prerequisites.
4689 a8083063 Iustin Pop

4690 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4691 a8083063 Iustin Pop

4692 a8083063 Iustin Pop
    """
4693 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4694 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4695 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4696 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4697 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4698 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4699 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4700 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4701 4300c4b6 Guido Trotter
    self.force = getattr(self.op, "force", None)
4702 338e51e8 Iustin Pop
    all_parms = [self.ip, self.bridge, self.mac]
4703 338e51e8 Iustin Pop
    if (all_parms.count(None) == len(all_parms) and
4704 338e51e8 Iustin Pop
        not self.op.hvparams and
4705 338e51e8 Iustin Pop
        not self.op.beparams):
4706 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4707 338e51e8 Iustin Pop
    for item in (constants.BE_MEMORY, constants.BE_VCPUS):
4708 338e51e8 Iustin Pop
      val = self.op.beparams.get(item, None)
4709 338e51e8 Iustin Pop
      if val is not None:
4710 338e51e8 Iustin Pop
        try:
4711 338e51e8 Iustin Pop
          val = int(val)
4712 338e51e8 Iustin Pop
        except ValueError, err:
4713 338e51e8 Iustin Pop
          raise errors.OpPrereqError("Invalid %s size: %s" % (item, str(err)))
4714 338e51e8 Iustin Pop
        self.op.beparams[item] = val
4715 a8083063 Iustin Pop
    if self.ip is not None:
4716 a8083063 Iustin Pop
      self.do_ip = True
4717 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4718 a8083063 Iustin Pop
        self.ip = None
4719 a8083063 Iustin Pop
      else:
4720 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4721 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4722 a8083063 Iustin Pop
    else:
4723 a8083063 Iustin Pop
      self.do_ip = False
4724 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4725 1862d460 Alexander Schreiber
    if self.mac is not None:
4726 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4727 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4728 1862d460 Alexander Schreiber
                                   self.mac)
4729 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4730 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4731 a8083063 Iustin Pop
4732 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
4733 31a853d2 Iustin Pop
4734 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4735 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4736 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4737 74409b12 Iustin Pop
    pnode = self.instance.primary_node
4738 74409b12 Iustin Pop
    nodelist = [pnode]
4739 74409b12 Iustin Pop
    nodelist.extend(instance.secondary_nodes)
4740 74409b12 Iustin Pop
4741 338e51e8 Iustin Pop
    # hvparams processing
4742 74409b12 Iustin Pop
    if self.op.hvparams:
4743 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
4744 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
4745 74409b12 Iustin Pop
        if val is None:
4746 74409b12 Iustin Pop
          try:
4747 74409b12 Iustin Pop
            del i_hvdict[key]
4748 74409b12 Iustin Pop
          except KeyError:
4749 74409b12 Iustin Pop
            pass
4750 74409b12 Iustin Pop
        else:
4751 74409b12 Iustin Pop
          i_hvdict[key] = val
4752 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4753 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
4754 74409b12 Iustin Pop
                                i_hvdict)
4755 74409b12 Iustin Pop
      # local check
4756 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
4757 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
4758 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
4759 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
4760 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
4761 338e51e8 Iustin Pop
    else:
4762 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
4763 338e51e8 Iustin Pop
4764 338e51e8 Iustin Pop
    # beparams processing
4765 338e51e8 Iustin Pop
    if self.op.beparams:
4766 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
4767 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
4768 338e51e8 Iustin Pop
        if val is None:
4769 338e51e8 Iustin Pop
          try:
4770 338e51e8 Iustin Pop
            del i_bedict[key]
4771 338e51e8 Iustin Pop
          except KeyError:
4772 338e51e8 Iustin Pop
            pass
4773 338e51e8 Iustin Pop
        else:
4774 338e51e8 Iustin Pop
          i_bedict[key] = val
4775 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4776 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4777 338e51e8 Iustin Pop
                                i_bedict)
4778 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
4779 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
4780 338e51e8 Iustin Pop
    else:
4781 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
4782 74409b12 Iustin Pop
4783 cfefe007 Guido Trotter
    self.warn = []
4784 647a5d80 Iustin Pop
4785 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
4786 647a5d80 Iustin Pop
      mem_check_list = [pnode]
4787 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
4788 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
4789 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
4790 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
4791 72737a7f Iustin Pop
                                                  instance.hypervisor)
4792 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
4793 72737a7f Iustin Pop
                                         instance.hypervisor)
4794 cfefe007 Guido Trotter
4795 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4796 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4797 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4798 cfefe007 Guido Trotter
      else:
4799 cfefe007 Guido Trotter
        if instance_info:
4800 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4801 cfefe007 Guido Trotter
        else:
4802 cfefe007 Guido Trotter
          # Assume instance not running
4803 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4804 cfefe007 Guido Trotter
          # and we have no other way to check)
4805 cfefe007 Guido Trotter
          current_mem = 0
4806 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
4807 338e51e8 Iustin Pop
                    nodeinfo[pnode]['memory_free'])
4808 cfefe007 Guido Trotter
        if miss_mem > 0:
4809 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4810 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4811 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4812 cfefe007 Guido Trotter
4813 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
4814 647a5d80 Iustin Pop
        for node in instance.secondary_nodes:
4815 647a5d80 Iustin Pop
          if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4816 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
4817 647a5d80 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nodeinfo[node]['memory_free']:
4818 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
4819 647a5d80 Iustin Pop
                             " secondary node %s" % node)
4820 5bc84f33 Alexander Schreiber
4821 a8083063 Iustin Pop
    return
4822 a8083063 Iustin Pop
4823 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4824 a8083063 Iustin Pop
    """Modifies an instance.
4825 a8083063 Iustin Pop

4826 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4827 a8083063 Iustin Pop
    """
4828 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4829 cfefe007 Guido Trotter
    # feedback_fn there.
4830 cfefe007 Guido Trotter
    for warn in self.warn:
4831 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4832 cfefe007 Guido Trotter
4833 a8083063 Iustin Pop
    result = []
4834 a8083063 Iustin Pop
    instance = self.instance
4835 a8083063 Iustin Pop
    if self.do_ip:
4836 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4837 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4838 a8083063 Iustin Pop
    if self.bridge:
4839 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4840 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4841 1862d460 Alexander Schreiber
    if self.mac:
4842 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4843 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4844 74409b12 Iustin Pop
    if self.op.hvparams:
4845 74409b12 Iustin Pop
      instance.hvparams = self.hv_new
4846 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
4847 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
4848 338e51e8 Iustin Pop
    if self.op.beparams:
4849 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
4850 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
4851 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
4852 a8083063 Iustin Pop
4853 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4854 a8083063 Iustin Pop
4855 a8083063 Iustin Pop
    return result
4856 a8083063 Iustin Pop
4857 a8083063 Iustin Pop
4858 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4859 a8083063 Iustin Pop
  """Query the exports list
4860 a8083063 Iustin Pop

4861 a8083063 Iustin Pop
  """
4862 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
4863 21a15682 Guido Trotter
  REQ_BGL = False
4864 21a15682 Guido Trotter
4865 21a15682 Guido Trotter
  def ExpandNames(self):
4866 21a15682 Guido Trotter
    self.needed_locks = {}
4867 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4868 21a15682 Guido Trotter
    if not self.op.nodes:
4869 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4870 21a15682 Guido Trotter
    else:
4871 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
4872 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
4873 a8083063 Iustin Pop
4874 a8083063 Iustin Pop
  def CheckPrereq(self):
4875 21a15682 Guido Trotter
    """Check prerequisites.
4876 a8083063 Iustin Pop

4877 a8083063 Iustin Pop
    """
4878 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
4879 a8083063 Iustin Pop
4880 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4881 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4882 a8083063 Iustin Pop

4883 a8083063 Iustin Pop
    Returns:
4884 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4885 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4886 a8083063 Iustin Pop
      that node.
4887 a8083063 Iustin Pop

4888 a8083063 Iustin Pop
    """
4889 72737a7f Iustin Pop
    return self.rpc.call_export_list(self.nodes)
4890 a8083063 Iustin Pop
4891 a8083063 Iustin Pop
4892 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4893 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4894 a8083063 Iustin Pop

4895 a8083063 Iustin Pop
  """
4896 a8083063 Iustin Pop
  HPATH = "instance-export"
4897 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4898 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4899 6657590e Guido Trotter
  REQ_BGL = False
4900 6657590e Guido Trotter
4901 6657590e Guido Trotter
  def ExpandNames(self):
4902 6657590e Guido Trotter
    self._ExpandAndLockInstance()
4903 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
4904 6657590e Guido Trotter
    #
4905 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
4906 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
4907 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
4908 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
4909 6657590e Guido Trotter
    #    then one to remove, after
4910 6657590e Guido Trotter
    #  - removing the removal operation altoghether
4911 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4912 6657590e Guido Trotter
4913 6657590e Guido Trotter
  def DeclareLocks(self, level):
4914 6657590e Guido Trotter
    """Last minute lock declaration."""
4915 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
4916 a8083063 Iustin Pop
4917 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4918 a8083063 Iustin Pop
    """Build hooks env.
4919 a8083063 Iustin Pop

4920 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4921 a8083063 Iustin Pop

4922 a8083063 Iustin Pop
    """
4923 a8083063 Iustin Pop
    env = {
4924 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4925 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4926 a8083063 Iustin Pop
      }
4927 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4928 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
4929 a8083063 Iustin Pop
          self.op.target_node]
4930 a8083063 Iustin Pop
    return env, nl, nl
4931 a8083063 Iustin Pop
4932 a8083063 Iustin Pop
  def CheckPrereq(self):
4933 a8083063 Iustin Pop
    """Check prerequisites.
4934 a8083063 Iustin Pop

4935 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4936 a8083063 Iustin Pop

4937 a8083063 Iustin Pop
    """
4938 6657590e Guido Trotter
    instance_name = self.op.instance_name
4939 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4940 6657590e Guido Trotter
    assert self.instance is not None, \
4941 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
4942 a8083063 Iustin Pop
4943 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
4944 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
4945 a8083063 Iustin Pop
4946 6657590e Guido Trotter
    assert self.dst_node is not None, \
4947 6657590e Guido Trotter
          "Cannot retrieve locked node %s" % self.op.target_node
4948 a8083063 Iustin Pop
4949 b6023d6c Manuel Franceschini
    # instance disk type verification
4950 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4951 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4952 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4953 b6023d6c Manuel Franceschini
                                   " file-based disks")
4954 b6023d6c Manuel Franceschini
4955 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4956 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4957 a8083063 Iustin Pop

4958 a8083063 Iustin Pop
    """
4959 a8083063 Iustin Pop
    instance = self.instance
4960 a8083063 Iustin Pop
    dst_node = self.dst_node
4961 a8083063 Iustin Pop
    src_node = instance.primary_node
4962 a8083063 Iustin Pop
    if self.op.shutdown:
4963 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4964 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(src_node, instance):
4965 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4966 38206f3c Iustin Pop
                                 (instance.name, src_node))
4967 a8083063 Iustin Pop
4968 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4969 a8083063 Iustin Pop
4970 a8083063 Iustin Pop
    snap_disks = []
4971 a8083063 Iustin Pop
4972 a8083063 Iustin Pop
    try:
4973 a8083063 Iustin Pop
      for disk in instance.disks:
4974 19d7f90a Guido Trotter
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4975 19d7f90a Guido Trotter
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
4976 a8083063 Iustin Pop
4977 19d7f90a Guido Trotter
        if not new_dev_name:
4978 19d7f90a Guido Trotter
          self.LogWarning("Could not snapshot block device %s on node %s",
4979 9a4f63d1 Iustin Pop
                          disk.logical_id[1], src_node)
4980 19d7f90a Guido Trotter
          snap_disks.append(False)
4981 19d7f90a Guido Trotter
        else:
4982 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4983 19d7f90a Guido Trotter
                                 logical_id=(vgname, new_dev_name),
4984 19d7f90a Guido Trotter
                                 physical_id=(vgname, new_dev_name),
4985 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
4986 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
4987 a8083063 Iustin Pop
4988 a8083063 Iustin Pop
    finally:
4989 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4990 72737a7f Iustin Pop
        if not self.rpc.call_instance_start(src_node, instance, None):
4991 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
4992 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4993 a8083063 Iustin Pop
4994 a8083063 Iustin Pop
    # TODO: check for size
4995 a8083063 Iustin Pop
4996 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
4997 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
4998 19d7f90a Guido Trotter
      if dev:
4999 19d7f90a Guido Trotter
        if not self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
5000 74c47259 Iustin Pop
                                             instance, cluster_name, idx):
5001 19d7f90a Guido Trotter
          self.LogWarning("Could not export block device %s from node %s to"
5002 19d7f90a Guido Trotter
                          " node %s", dev.logical_id[1], src_node,
5003 19d7f90a Guido Trotter
                          dst_node.name)
5004 19d7f90a Guido Trotter
        if not self.rpc.call_blockdev_remove(src_node, dev):
5005 19d7f90a Guido Trotter
          self.LogWarning("Could not remove snapshot block device %s from node"
5006 19d7f90a Guido Trotter
                          " %s", dev.logical_id[1], src_node)
5007 a8083063 Iustin Pop
5008 72737a7f Iustin Pop
    if not self.rpc.call_finalize_export(dst_node.name, instance, snap_disks):
5009 19d7f90a Guido Trotter
      self.LogWarning("Could not finalize export for instance %s on node %s",
5010 19d7f90a Guido Trotter
                      instance.name, dst_node.name)
5011 a8083063 Iustin Pop
5012 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
5013 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
5014 a8083063 Iustin Pop
5015 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
5016 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
5017 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
5018 a8083063 Iustin Pop
    if nodelist:
5019 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
5020 a8083063 Iustin Pop
      for node in exportlist:
5021 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
5022 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
5023 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
5024 19d7f90a Guido Trotter
                            " on node %s", instance.name, node)
5025 5c947f38 Iustin Pop
5026 5c947f38 Iustin Pop
5027 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
5028 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
5029 9ac99fda Guido Trotter

5030 9ac99fda Guido Trotter
  """
5031 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
5032 3656b3af Guido Trotter
  REQ_BGL = False
5033 3656b3af Guido Trotter
5034 3656b3af Guido Trotter
  def ExpandNames(self):
5035 3656b3af Guido Trotter
    self.needed_locks = {}
5036 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
5037 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
5038 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
5039 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5040 9ac99fda Guido Trotter
5041 9ac99fda Guido Trotter
  def CheckPrereq(self):
5042 9ac99fda Guido Trotter
    """Check prerequisites.
5043 9ac99fda Guido Trotter
    """
5044 9ac99fda Guido Trotter
    pass
5045 9ac99fda Guido Trotter
5046 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
5047 9ac99fda Guido Trotter
    """Remove any export.
5048 9ac99fda Guido Trotter

5049 9ac99fda Guido Trotter
    """
5050 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
5051 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
5052 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
5053 9ac99fda Guido Trotter
    fqdn_warn = False
5054 9ac99fda Guido Trotter
    if not instance_name:
5055 9ac99fda Guido Trotter
      fqdn_warn = True
5056 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
5057 9ac99fda Guido Trotter
5058 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
5059 72737a7f Iustin Pop
      locking.LEVEL_NODE])
5060 9ac99fda Guido Trotter
    found = False
5061 9ac99fda Guido Trotter
    for node in exportlist:
5062 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
5063 9ac99fda Guido Trotter
        found = True
5064 72737a7f Iustin Pop
        if not self.rpc.call_export_remove(node, instance_name):
5065 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
5066 9a4f63d1 Iustin Pop
                        " on node %s", instance_name, node)
5067 9ac99fda Guido Trotter
5068 9ac99fda Guido Trotter
    if fqdn_warn and not found:
5069 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
5070 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
5071 9ac99fda Guido Trotter
                  " Domain Name.")
5072 9ac99fda Guido Trotter
5073 9ac99fda Guido Trotter
5074 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
5075 5c947f38 Iustin Pop
  """Generic tags LU.
5076 5c947f38 Iustin Pop

5077 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
5078 5c947f38 Iustin Pop

5079 5c947f38 Iustin Pop
  """
5080 5c947f38 Iustin Pop
5081 8646adce Guido Trotter
  def ExpandNames(self):
5082 8646adce Guido Trotter
    self.needed_locks = {}
5083 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
5084 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
5085 5c947f38 Iustin Pop
      if name is None:
5086 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
5087 3ecf6786 Iustin Pop
                                   (self.op.name,))
5088 5c947f38 Iustin Pop
      self.op.name = name
5089 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
5090 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
5091 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
5092 5c947f38 Iustin Pop
      if name is None:
5093 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
5094 3ecf6786 Iustin Pop
                                   (self.op.name,))
5095 5c947f38 Iustin Pop
      self.op.name = name
5096 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
5097 8646adce Guido Trotter
5098 8646adce Guido Trotter
  def CheckPrereq(self):
5099 8646adce Guido Trotter
    """Check prerequisites.
5100 8646adce Guido Trotter

5101 8646adce Guido Trotter
    """
5102 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
5103 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
5104 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
5105 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
5106 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
5107 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
5108 5c947f38 Iustin Pop
    else:
5109 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
5110 3ecf6786 Iustin Pop
                                 str(self.op.kind))
5111 5c947f38 Iustin Pop
5112 5c947f38 Iustin Pop
5113 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
5114 5c947f38 Iustin Pop
  """Returns the tags of a given object.
5115 5c947f38 Iustin Pop

5116 5c947f38 Iustin Pop
  """
5117 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
5118 8646adce Guido Trotter
  REQ_BGL = False
5119 5c947f38 Iustin Pop
5120 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5121 5c947f38 Iustin Pop
    """Returns the tag list.
5122 5c947f38 Iustin Pop

5123 5c947f38 Iustin Pop
    """
5124 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
5125 5c947f38 Iustin Pop
5126 5c947f38 Iustin Pop
5127 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
5128 73415719 Iustin Pop
  """Searches the tags for a given pattern.
5129 73415719 Iustin Pop

5130 73415719 Iustin Pop
  """
5131 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
5132 8646adce Guido Trotter
  REQ_BGL = False
5133 8646adce Guido Trotter
5134 8646adce Guido Trotter
  def ExpandNames(self):
5135 8646adce Guido Trotter
    self.needed_locks = {}
5136 73415719 Iustin Pop
5137 73415719 Iustin Pop
  def CheckPrereq(self):
5138 73415719 Iustin Pop
    """Check prerequisites.
5139 73415719 Iustin Pop

5140 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
5141 73415719 Iustin Pop

5142 73415719 Iustin Pop
    """
5143 73415719 Iustin Pop
    try:
5144 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
5145 73415719 Iustin Pop
    except re.error, err:
5146 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
5147 73415719 Iustin Pop
                                 (self.op.pattern, err))
5148 73415719 Iustin Pop
5149 73415719 Iustin Pop
  def Exec(self, feedback_fn):
5150 73415719 Iustin Pop
    """Returns the tag list.
5151 73415719 Iustin Pop

5152 73415719 Iustin Pop
    """
5153 73415719 Iustin Pop
    cfg = self.cfg
5154 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
5155 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
5156 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
5157 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
5158 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
5159 73415719 Iustin Pop
    results = []
5160 73415719 Iustin Pop
    for path, target in tgts:
5161 73415719 Iustin Pop
      for tag in target.GetTags():
5162 73415719 Iustin Pop
        if self.re.search(tag):
5163 73415719 Iustin Pop
          results.append((path, tag))
5164 73415719 Iustin Pop
    return results
5165 73415719 Iustin Pop
5166 73415719 Iustin Pop
5167 f27302fa Iustin Pop
class LUAddTags(TagsLU):
5168 5c947f38 Iustin Pop
  """Sets a tag on a given object.
5169 5c947f38 Iustin Pop

5170 5c947f38 Iustin Pop
  """
5171 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5172 8646adce Guido Trotter
  REQ_BGL = False
5173 5c947f38 Iustin Pop
5174 5c947f38 Iustin Pop
  def CheckPrereq(self):
5175 5c947f38 Iustin Pop
    """Check prerequisites.
5176 5c947f38 Iustin Pop

5177 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
5178 5c947f38 Iustin Pop

5179 5c947f38 Iustin Pop
    """
5180 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5181 f27302fa Iustin Pop
    for tag in self.op.tags:
5182 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5183 5c947f38 Iustin Pop
5184 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5185 5c947f38 Iustin Pop
    """Sets the tag.
5186 5c947f38 Iustin Pop

5187 5c947f38 Iustin Pop
    """
5188 5c947f38 Iustin Pop
    try:
5189 f27302fa Iustin Pop
      for tag in self.op.tags:
5190 f27302fa Iustin Pop
        self.target.AddTag(tag)
5191 5c947f38 Iustin Pop
    except errors.TagError, err:
5192 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
5193 5c947f38 Iustin Pop
    try:
5194 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5195 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5196 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5197 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5198 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5199 5c947f38 Iustin Pop
5200 5c947f38 Iustin Pop
5201 f27302fa Iustin Pop
class LUDelTags(TagsLU):
5202 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
5203 5c947f38 Iustin Pop

5204 5c947f38 Iustin Pop
  """
5205 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5206 8646adce Guido Trotter
  REQ_BGL = False
5207 5c947f38 Iustin Pop
5208 5c947f38 Iustin Pop
  def CheckPrereq(self):
5209 5c947f38 Iustin Pop
    """Check prerequisites.
5210 5c947f38 Iustin Pop

5211 5c947f38 Iustin Pop
    This checks that we have the given tag.
5212 5c947f38 Iustin Pop

5213 5c947f38 Iustin Pop
    """
5214 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5215 f27302fa Iustin Pop
    for tag in self.op.tags:
5216 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5217 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
5218 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
5219 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
5220 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
5221 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
5222 f27302fa Iustin Pop
      diff_names.sort()
5223 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
5224 f27302fa Iustin Pop
                                 (",".join(diff_names)))
5225 5c947f38 Iustin Pop
5226 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5227 5c947f38 Iustin Pop
    """Remove the tag from the object.
5228 5c947f38 Iustin Pop

5229 5c947f38 Iustin Pop
    """
5230 f27302fa Iustin Pop
    for tag in self.op.tags:
5231 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
5232 5c947f38 Iustin Pop
    try:
5233 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5234 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5235 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5236 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5237 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5238 06009e27 Iustin Pop
5239 0eed6e61 Guido Trotter
5240 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
5241 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
5242 06009e27 Iustin Pop

5243 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
5244 06009e27 Iustin Pop
  time.
5245 06009e27 Iustin Pop

5246 06009e27 Iustin Pop
  """
5247 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5248 fbe9022f Guido Trotter
  REQ_BGL = False
5249 06009e27 Iustin Pop
5250 fbe9022f Guido Trotter
  def ExpandNames(self):
5251 fbe9022f Guido Trotter
    """Expand names and set required locks.
5252 06009e27 Iustin Pop

5253 fbe9022f Guido Trotter
    This expands the node list, if any.
5254 06009e27 Iustin Pop

5255 06009e27 Iustin Pop
    """
5256 fbe9022f Guido Trotter
    self.needed_locks = {}
5257 06009e27 Iustin Pop
    if self.op.on_nodes:
5258 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
5259 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5260 fbe9022f Guido Trotter
      # more information.
5261 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5262 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5263 fbe9022f Guido Trotter
5264 fbe9022f Guido Trotter
  def CheckPrereq(self):
5265 fbe9022f Guido Trotter
    """Check prerequisites.
5266 fbe9022f Guido Trotter

5267 fbe9022f Guido Trotter
    """
5268 06009e27 Iustin Pop
5269 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
5270 06009e27 Iustin Pop
    """Do the actual sleep.
5271 06009e27 Iustin Pop

5272 06009e27 Iustin Pop
    """
5273 06009e27 Iustin Pop
    if self.op.on_master:
5274 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
5275 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
5276 06009e27 Iustin Pop
    if self.op.on_nodes:
5277 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5278 06009e27 Iustin Pop
      if not result:
5279 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
5280 06009e27 Iustin Pop
      for node, node_result in result.items():
5281 06009e27 Iustin Pop
        if not node_result:
5282 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
5283 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
5284 d61df03e Iustin Pop
5285 d61df03e Iustin Pop
5286 d1c2dd75 Iustin Pop
class IAllocator(object):
5287 d1c2dd75 Iustin Pop
  """IAllocator framework.
5288 d61df03e Iustin Pop

5289 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
5290 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
5291 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
5292 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
5293 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
5294 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
5295 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
5296 d1c2dd75 Iustin Pop
      easy usage
5297 d61df03e Iustin Pop

5298 d61df03e Iustin Pop
  """
5299 29859cb7 Iustin Pop
  _ALLO_KEYS = [
5300 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
5301 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
5302 d1c2dd75 Iustin Pop
    ]
5303 29859cb7 Iustin Pop
  _RELO_KEYS = [
5304 29859cb7 Iustin Pop
    "relocate_from",
5305 29859cb7 Iustin Pop
    ]
5306 d1c2dd75 Iustin Pop
5307 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
5308 72737a7f Iustin Pop
    self.lu = lu
5309 d1c2dd75 Iustin Pop
    # init buffer variables
5310 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
5311 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
5312 29859cb7 Iustin Pop
    self.mode = mode
5313 29859cb7 Iustin Pop
    self.name = name
5314 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
5315 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
5316 29859cb7 Iustin Pop
    self.relocate_from = None
5317 27579978 Iustin Pop
    # computed fields
5318 27579978 Iustin Pop
    self.required_nodes = None
5319 d1c2dd75 Iustin Pop
    # init result fields
5320 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
5321 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5322 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
5323 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5324 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
5325 29859cb7 Iustin Pop
    else:
5326 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5327 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
5328 d1c2dd75 Iustin Pop
    for key in kwargs:
5329 29859cb7 Iustin Pop
      if key not in keyset:
5330 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5331 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5332 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
5333 29859cb7 Iustin Pop
    for key in keyset:
5334 d1c2dd75 Iustin Pop
      if key not in kwargs:
5335 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5336 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5337 d1c2dd75 Iustin Pop
    self._BuildInputData()
5338 d1c2dd75 Iustin Pop
5339 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
5340 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
5341 d1c2dd75 Iustin Pop

5342 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
5343 d1c2dd75 Iustin Pop

5344 d1c2dd75 Iustin Pop
    """
5345 72737a7f Iustin Pop
    cfg = self.lu.cfg
5346 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
5347 d1c2dd75 Iustin Pop
    # cluster data
5348 d1c2dd75 Iustin Pop
    data = {
5349 d1c2dd75 Iustin Pop
      "version": 1,
5350 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
5351 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
5352 e69d05fd Iustin Pop
      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5353 d1c2dd75 Iustin Pop
      # we don't have job IDs
5354 d61df03e Iustin Pop
      }
5355 d61df03e Iustin Pop
5356 338e51e8 Iustin Pop
    i_list = []
5357 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5358 338e51e8 Iustin Pop
    for iname in cfg.GetInstanceList():
5359 338e51e8 Iustin Pop
      i_obj = cfg.GetInstanceInfo(iname)
5360 338e51e8 Iustin Pop
      i_list.append((i_obj, cluster.FillBE(i_obj)))
5361 6286519f Iustin Pop
5362 d1c2dd75 Iustin Pop
    # node data
5363 d1c2dd75 Iustin Pop
    node_results = {}
5364 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
5365 e69d05fd Iustin Pop
    # FIXME: here we have only one hypervisor information, but
5366 e69d05fd Iustin Pop
    # instance can belong to different hypervisors
5367 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
5368 72737a7f Iustin Pop
                                           cfg.GetHypervisorType())
5369 d1c2dd75 Iustin Pop
    for nname in node_list:
5370 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
5371 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
5372 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
5373 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
5374 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5375 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
5376 d1c2dd75 Iustin Pop
        if attr not in remote_info:
5377 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5378 d1c2dd75 Iustin Pop
                                   (nname, attr))
5379 d1c2dd75 Iustin Pop
        try:
5380 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
5381 d1c2dd75 Iustin Pop
        except ValueError, err:
5382 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5383 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
5384 6286519f Iustin Pop
      # compute memory used by primary instances
5385 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
5386 338e51e8 Iustin Pop
      for iinfo, beinfo in i_list:
5387 6286519f Iustin Pop
        if iinfo.primary_node == nname:
5388 338e51e8 Iustin Pop
          i_p_mem += beinfo[constants.BE_MEMORY]
5389 6286519f Iustin Pop
          if iinfo.status == "up":
5390 338e51e8 Iustin Pop
            i_p_up_mem += beinfo[constants.BE_MEMORY]
5391 6286519f Iustin Pop
5392 b2662e7f Iustin Pop
      # compute memory used by instances
5393 d1c2dd75 Iustin Pop
      pnr = {
5394 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
5395 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
5396 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
5397 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
5398 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
5399 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
5400 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
5401 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
5402 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
5403 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5404 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5405 d1c2dd75 Iustin Pop
        }
5406 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5407 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5408 d1c2dd75 Iustin Pop
5409 d1c2dd75 Iustin Pop
    # instance data
5410 d1c2dd75 Iustin Pop
    instance_data = {}
5411 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
5412 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5413 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5414 d1c2dd75 Iustin Pop
      pir = {
5415 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5416 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5417 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
5418 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
5419 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5420 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5421 d1c2dd75 Iustin Pop
        "nics": nic_data,
5422 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5423 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5424 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
5425 d1c2dd75 Iustin Pop
        }
5426 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5427 d61df03e Iustin Pop
5428 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5429 d61df03e Iustin Pop
5430 d1c2dd75 Iustin Pop
    self.in_data = data
5431 d61df03e Iustin Pop
5432 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5433 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5434 d61df03e Iustin Pop

5435 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5436 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5437 d61df03e Iustin Pop

5438 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5439 d1c2dd75 Iustin Pop
    done.
5440 d61df03e Iustin Pop

5441 d1c2dd75 Iustin Pop
    """
5442 d1c2dd75 Iustin Pop
    data = self.in_data
5443 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5444 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5445 d1c2dd75 Iustin Pop
5446 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
5447 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
5448 d1c2dd75 Iustin Pop
5449 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5450 27579978 Iustin Pop
      self.required_nodes = 2
5451 27579978 Iustin Pop
    else:
5452 27579978 Iustin Pop
      self.required_nodes = 1
5453 d1c2dd75 Iustin Pop
    request = {
5454 d1c2dd75 Iustin Pop
      "type": "allocate",
5455 d1c2dd75 Iustin Pop
      "name": self.name,
5456 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5457 d1c2dd75 Iustin Pop
      "tags": self.tags,
5458 d1c2dd75 Iustin Pop
      "os": self.os,
5459 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5460 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5461 d1c2dd75 Iustin Pop
      "disks": self.disks,
5462 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5463 d1c2dd75 Iustin Pop
      "nics": self.nics,
5464 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5465 d1c2dd75 Iustin Pop
      }
5466 d1c2dd75 Iustin Pop
    data["request"] = request
5467 298fe380 Iustin Pop
5468 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5469 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5470 298fe380 Iustin Pop

5471 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5472 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5473 d61df03e Iustin Pop

5474 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5475 d1c2dd75 Iustin Pop
    done.
5476 d61df03e Iustin Pop

5477 d1c2dd75 Iustin Pop
    """
5478 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
5479 27579978 Iustin Pop
    if instance is None:
5480 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5481 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5482 27579978 Iustin Pop
5483 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5484 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5485 27579978 Iustin Pop
5486 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5487 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5488 2a139bb0 Iustin Pop
5489 27579978 Iustin Pop
    self.required_nodes = 1
5490 27579978 Iustin Pop
5491 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
5492 27579978 Iustin Pop
                                  instance.disks[0].size,
5493 27579978 Iustin Pop
                                  instance.disks[1].size)
5494 27579978 Iustin Pop
5495 d1c2dd75 Iustin Pop
    request = {
5496 2a139bb0 Iustin Pop
      "type": "relocate",
5497 d1c2dd75 Iustin Pop
      "name": self.name,
5498 27579978 Iustin Pop
      "disk_space_total": disk_space,
5499 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5500 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5501 d1c2dd75 Iustin Pop
      }
5502 27579978 Iustin Pop
    self.in_data["request"] = request
5503 d61df03e Iustin Pop
5504 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5505 d1c2dd75 Iustin Pop
    """Build input data structures.
5506 d61df03e Iustin Pop

5507 d1c2dd75 Iustin Pop
    """
5508 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5509 d61df03e Iustin Pop
5510 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5511 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5512 d1c2dd75 Iustin Pop
    else:
5513 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5514 d61df03e Iustin Pop
5515 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5516 d61df03e Iustin Pop
5517 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
5518 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5519 298fe380 Iustin Pop

5520 d1c2dd75 Iustin Pop
    """
5521 72737a7f Iustin Pop
    if call_fn is None:
5522 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
5523 d1c2dd75 Iustin Pop
    data = self.in_text
5524 298fe380 Iustin Pop
5525 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
5526 298fe380 Iustin Pop
5527 43f5ea7a Guido Trotter
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5528 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5529 8d528b7c Iustin Pop
5530 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5531 8d528b7c Iustin Pop
5532 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5533 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5534 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5535 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5536 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5537 8d528b7c Iustin Pop
    self.out_text = stdout
5538 d1c2dd75 Iustin Pop
    if validate:
5539 d1c2dd75 Iustin Pop
      self._ValidateResult()
5540 298fe380 Iustin Pop
5541 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5542 d1c2dd75 Iustin Pop
    """Process the allocator results.
5543 538475ca Iustin Pop

5544 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5545 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5546 538475ca Iustin Pop

5547 d1c2dd75 Iustin Pop
    """
5548 d1c2dd75 Iustin Pop
    try:
5549 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5550 d1c2dd75 Iustin Pop
    except Exception, err:
5551 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5552 d1c2dd75 Iustin Pop
5553 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5554 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5555 538475ca Iustin Pop
5556 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5557 d1c2dd75 Iustin Pop
      if key not in rdict:
5558 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5559 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5560 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5561 538475ca Iustin Pop
5562 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5563 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5564 d1c2dd75 Iustin Pop
                               " is not a list")
5565 d1c2dd75 Iustin Pop
    self.out_data = rdict
5566 538475ca Iustin Pop
5567 538475ca Iustin Pop
5568 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5569 d61df03e Iustin Pop
  """Run allocator tests.
5570 d61df03e Iustin Pop

5571 d61df03e Iustin Pop
  This LU runs the allocator tests
5572 d61df03e Iustin Pop

5573 d61df03e Iustin Pop
  """
5574 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5575 d61df03e Iustin Pop
5576 d61df03e Iustin Pop
  def CheckPrereq(self):
5577 d61df03e Iustin Pop
    """Check prerequisites.
5578 d61df03e Iustin Pop

5579 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5580 d61df03e Iustin Pop

5581 d61df03e Iustin Pop
    """
5582 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5583 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5584 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5585 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5586 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5587 d61df03e Iustin Pop
                                     attr)
5588 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5589 d61df03e Iustin Pop
      if iname is not None:
5590 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5591 d61df03e Iustin Pop
                                   iname)
5592 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5593 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5594 d61df03e Iustin Pop
      for row in self.op.nics:
5595 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5596 d61df03e Iustin Pop
            "mac" not in row or
5597 d61df03e Iustin Pop
            "ip" not in row or
5598 d61df03e Iustin Pop
            "bridge" not in row):
5599 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5600 d61df03e Iustin Pop
                                     " 'nics' parameter")
5601 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5602 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5603 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5604 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5605 d61df03e Iustin Pop
      for row in self.op.disks:
5606 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5607 d61df03e Iustin Pop
            "size" not in row or
5608 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5609 d61df03e Iustin Pop
            "mode" not in row or
5610 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5611 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5612 d61df03e Iustin Pop
                                     " 'disks' parameter")
5613 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5614 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5615 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5616 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5617 d61df03e Iustin Pop
      if fname is None:
5618 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5619 d61df03e Iustin Pop
                                   self.op.name)
5620 d61df03e Iustin Pop
      self.op.name = fname
5621 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5622 d61df03e Iustin Pop
    else:
5623 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5624 d61df03e Iustin Pop
                                 self.op.mode)
5625 d61df03e Iustin Pop
5626 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5627 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5628 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5629 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5630 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5631 d61df03e Iustin Pop
                                 self.op.direction)
5632 d61df03e Iustin Pop
5633 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5634 d61df03e Iustin Pop
    """Run the allocator test.
5635 d61df03e Iustin Pop

5636 d61df03e Iustin Pop
    """
5637 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5638 72737a7f Iustin Pop
      ial = IAllocator(self,
5639 29859cb7 Iustin Pop
                       mode=self.op.mode,
5640 29859cb7 Iustin Pop
                       name=self.op.name,
5641 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5642 29859cb7 Iustin Pop
                       disks=self.op.disks,
5643 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5644 29859cb7 Iustin Pop
                       os=self.op.os,
5645 29859cb7 Iustin Pop
                       tags=self.op.tags,
5646 29859cb7 Iustin Pop
                       nics=self.op.nics,
5647 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5648 29859cb7 Iustin Pop
                       )
5649 29859cb7 Iustin Pop
    else:
5650 72737a7f Iustin Pop
      ial = IAllocator(self,
5651 29859cb7 Iustin Pop
                       mode=self.op.mode,
5652 29859cb7 Iustin Pop
                       name=self.op.name,
5653 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5654 29859cb7 Iustin Pop
                       )
5655 d61df03e Iustin Pop
5656 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5657 d1c2dd75 Iustin Pop
      result = ial.in_text
5658 298fe380 Iustin Pop
    else:
5659 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5660 d1c2dd75 Iustin Pop
      result = ial.out_text
5661 298fe380 Iustin Pop
    return result