Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 1a05d855

History | View | Annotate | Download (191 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 74409b12 Iustin Pop
import copy
35 a8083063 Iustin Pop
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 8d14b30d Iustin Pop
from ganeti import serializer
45 d61df03e Iustin Pop
46 d61df03e Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 d465bdc8 Guido Trotter
    - implement ExpandNames
52 d465bdc8 Guido Trotter
    - implement CheckPrereq
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 05f86716 Guido Trotter
    - optionally redefine their run requirements:
57 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
58 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
59 05f86716 Guido Trotter

60 05f86716 Guido Trotter
  Note that all commands require root permissions.
61 a8083063 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 a8083063 Iustin Pop
  REQ_MASTER = True
67 7e55040e Guido Trotter
  REQ_BGL = True
68 a8083063 Iustin Pop
69 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
70 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
73 a8083063 Iustin Pop
    validity.
74 a8083063 Iustin Pop

75 a8083063 Iustin Pop
    """
76 5bfac263 Iustin Pop
    self.proc = processor
77 a8083063 Iustin Pop
    self.op = op
78 77b657a3 Guido Trotter
    self.cfg = context.cfg
79 77b657a3 Guido Trotter
    self.context = context
80 72737a7f Iustin Pop
    self.rpc = rpc
81 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
82 d465bdc8 Guido Trotter
    self.needed_locks = None
83 6683bba2 Guido Trotter
    self.acquired_locks = {}
84 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85 ca2a79e1 Guido Trotter
    self.add_locks = {}
86 ca2a79e1 Guido Trotter
    self.remove_locks = {}
87 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
88 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
89 c92b310a Michael Hanselmann
    self.__ssh = None
90 86d9d3bb Iustin Pop
    # logging
91 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
92 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
93 c92b310a Michael Hanselmann
94 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
95 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
96 a8083063 Iustin Pop
      if attr_val is None:
97 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
98 3ecf6786 Iustin Pop
                                   attr_name)
99 c6d58a2b Michael Hanselmann
100 f64c9de6 Guido Trotter
    if not self.cfg.IsCluster():
101 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
102 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
103 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
104 d6a02168 Michael Hanselmann
      master = self.cfg.GetMasterNode()
105 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
106 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
107 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
108 a8083063 Iustin Pop
109 c92b310a Michael Hanselmann
  def __GetSSH(self):
110 c92b310a Michael Hanselmann
    """Returns the SshRunner object
111 c92b310a Michael Hanselmann

112 c92b310a Michael Hanselmann
    """
113 c92b310a Michael Hanselmann
    if not self.__ssh:
114 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
115 c92b310a Michael Hanselmann
    return self.__ssh
116 c92b310a Michael Hanselmann
117 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
118 c92b310a Michael Hanselmann
119 d465bdc8 Guido Trotter
  def ExpandNames(self):
120 d465bdc8 Guido Trotter
    """Expand names for this LU.
121 d465bdc8 Guido Trotter

122 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
123 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
124 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
125 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
126 d465bdc8 Guido Trotter

127 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
128 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
129 d465bdc8 Guido Trotter
    as values. Rules:
130 d465bdc8 Guido Trotter
      - Use an empty dict if you don't need any lock
131 d465bdc8 Guido Trotter
      - If you don't need any lock at a particular level omit that level
132 d465bdc8 Guido Trotter
      - Don't put anything for the BGL level
133 e310b019 Guido Trotter
      - If you want all locks at a level use locking.ALL_SET as a value
134 d465bdc8 Guido Trotter

135 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
136 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
137 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
138 3977a4c1 Guido Trotter

139 d465bdc8 Guido Trotter
    Examples:
140 d465bdc8 Guido Trotter
    # Acquire all nodes and one instance
141 d465bdc8 Guido Trotter
    self.needed_locks = {
142 e310b019 Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
143 3a5d7305 Guido Trotter
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
144 d465bdc8 Guido Trotter
    }
145 d465bdc8 Guido Trotter
    # Acquire just two nodes
146 d465bdc8 Guido Trotter
    self.needed_locks = {
147 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
148 d465bdc8 Guido Trotter
    }
149 d465bdc8 Guido Trotter
    # Acquire no locks
150 d465bdc8 Guido Trotter
    self.needed_locks = {} # No, you can't leave it to the default value None
151 d465bdc8 Guido Trotter

152 d465bdc8 Guido Trotter
    """
153 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
154 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
155 d465bdc8 Guido Trotter
    # time.
156 d465bdc8 Guido Trotter
    if self.REQ_BGL:
157 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
158 d465bdc8 Guido Trotter
    else:
159 d465bdc8 Guido Trotter
      raise NotImplementedError
160 d465bdc8 Guido Trotter
161 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
162 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
163 fb8dcb62 Guido Trotter

164 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
165 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
166 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
167 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
168 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
169 fb8dcb62 Guido Trotter
    default it does nothing.
170 fb8dcb62 Guido Trotter

171 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
172 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
173 fb8dcb62 Guido Trotter

174 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
175 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
176 fb8dcb62 Guido Trotter

177 fb8dcb62 Guido Trotter
    """
178 fb8dcb62 Guido Trotter
179 a8083063 Iustin Pop
  def CheckPrereq(self):
180 a8083063 Iustin Pop
    """Check prerequisites for this LU.
181 a8083063 Iustin Pop

182 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
183 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
184 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
185 a8083063 Iustin Pop
    allowed.
186 a8083063 Iustin Pop

187 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
188 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
189 a8083063 Iustin Pop

190 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
191 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
192 a8083063 Iustin Pop

193 a8083063 Iustin Pop
    """
194 a8083063 Iustin Pop
    raise NotImplementedError
195 a8083063 Iustin Pop
196 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
197 a8083063 Iustin Pop
    """Execute the LU.
198 a8083063 Iustin Pop

199 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
200 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
201 a8083063 Iustin Pop
    code, or expected.
202 a8083063 Iustin Pop

203 a8083063 Iustin Pop
    """
204 a8083063 Iustin Pop
    raise NotImplementedError
205 a8083063 Iustin Pop
206 a8083063 Iustin Pop
  def BuildHooksEnv(self):
207 a8083063 Iustin Pop
    """Build hooks environment for this LU.
208 a8083063 Iustin Pop

209 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
210 a8083063 Iustin Pop
    containing the environment that will be used for running the
211 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
212 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
213 a8083063 Iustin Pop
    the hook should run after the execution.
214 a8083063 Iustin Pop

215 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
216 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
217 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
218 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
219 a8083063 Iustin Pop

220 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
221 a8083063 Iustin Pop

222 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
223 a8083063 Iustin Pop
    not be called.
224 a8083063 Iustin Pop

225 a8083063 Iustin Pop
    """
226 a8083063 Iustin Pop
    raise NotImplementedError
227 a8083063 Iustin Pop
228 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
229 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
230 1fce5219 Guido Trotter

231 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
232 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
233 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
234 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
235 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
236 1fce5219 Guido Trotter

237 1fce5219 Guido Trotter
    Args:
238 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
239 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
240 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
241 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
242 1fce5219 Guido Trotter

243 1fce5219 Guido Trotter
    """
244 1fce5219 Guido Trotter
    return lu_result
245 1fce5219 Guido Trotter
246 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
247 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
248 43905206 Guido Trotter

249 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
250 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
251 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
252 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
253 43905206 Guido Trotter
    before.
254 43905206 Guido Trotter

255 43905206 Guido Trotter
    """
256 43905206 Guido Trotter
    if self.needed_locks is None:
257 43905206 Guido Trotter
      self.needed_locks = {}
258 43905206 Guido Trotter
    else:
259 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
260 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
261 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
262 43905206 Guido Trotter
    if expanded_name is None:
263 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
264 43905206 Guido Trotter
                                  self.op.instance_name)
265 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
266 43905206 Guido Trotter
    self.op.instance_name = expanded_name
267 43905206 Guido Trotter
268 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
269 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
270 c4a2fee1 Guido Trotter

271 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
272 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
273 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
274 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
275 c4a2fee1 Guido Trotter

276 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
277 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
278 c4a2fee1 Guido Trotter

279 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
280 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
281 c4a2fee1 Guido Trotter

282 c4a2fee1 Guido Trotter
    If should be called in DeclareLocks in a way similar to:
283 c4a2fee1 Guido Trotter

284 c4a2fee1 Guido Trotter
    if level == locking.LEVEL_NODE:
285 c4a2fee1 Guido Trotter
      self._LockInstancesNodes()
286 c4a2fee1 Guido Trotter

287 a82ce292 Guido Trotter
    @type primary_only: boolean
288 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
289 a82ce292 Guido Trotter

290 c4a2fee1 Guido Trotter
    """
291 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
292 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
293 c4a2fee1 Guido Trotter
294 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
295 c4a2fee1 Guido Trotter
296 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
297 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
298 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
299 c4a2fee1 Guido Trotter
    wanted_nodes = []
300 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
301 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
302 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
303 a82ce292 Guido Trotter
      if not primary_only:
304 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
305 9513b6ab Guido Trotter
306 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
307 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
308 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
309 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
310 c4a2fee1 Guido Trotter
311 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
312 c4a2fee1 Guido Trotter
313 a8083063 Iustin Pop
314 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
315 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
316 a8083063 Iustin Pop

317 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
318 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
319 a8083063 Iustin Pop

320 a8083063 Iustin Pop
  """
321 a8083063 Iustin Pop
  HPATH = None
322 a8083063 Iustin Pop
  HTYPE = None
323 a8083063 Iustin Pop
324 a8083063 Iustin Pop
325 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
326 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
327 83120a01 Michael Hanselmann

328 83120a01 Michael Hanselmann
  Args:
329 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
330 83120a01 Michael Hanselmann

331 83120a01 Michael Hanselmann
  """
332 3312b702 Iustin Pop
  if not isinstance(nodes, list):
333 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
334 dcb93971 Michael Hanselmann
335 ea47808a Guido Trotter
  if not nodes:
336 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
337 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
338 dcb93971 Michael Hanselmann
339 ea47808a Guido Trotter
  wanted = []
340 ea47808a Guido Trotter
  for name in nodes:
341 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
342 ea47808a Guido Trotter
    if node is None:
343 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
344 ea47808a Guido Trotter
    wanted.append(node)
345 dcb93971 Michael Hanselmann
346 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
347 3312b702 Iustin Pop
348 3312b702 Iustin Pop
349 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
350 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
351 3312b702 Iustin Pop

352 3312b702 Iustin Pop
  Args:
353 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
354 3312b702 Iustin Pop

355 3312b702 Iustin Pop
  """
356 3312b702 Iustin Pop
  if not isinstance(instances, list):
357 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
358 3312b702 Iustin Pop
359 3312b702 Iustin Pop
  if instances:
360 3312b702 Iustin Pop
    wanted = []
361 3312b702 Iustin Pop
362 3312b702 Iustin Pop
    for name in instances:
363 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
364 3312b702 Iustin Pop
      if instance is None:
365 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
366 3312b702 Iustin Pop
      wanted.append(instance)
367 3312b702 Iustin Pop
368 3312b702 Iustin Pop
  else:
369 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
370 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
371 dcb93971 Michael Hanselmann
372 dcb93971 Michael Hanselmann
373 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
374 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
375 83120a01 Michael Hanselmann

376 83120a01 Michael Hanselmann
  Args:
377 83120a01 Michael Hanselmann
    static: Static fields
378 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
379 83120a01 Michael Hanselmann

380 83120a01 Michael Hanselmann
  """
381 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
382 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
383 dcb93971 Michael Hanselmann
384 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
385 dcb93971 Michael Hanselmann
386 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
387 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
388 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
389 3ecf6786 Iustin Pop
                                          difference(all_fields)))
390 dcb93971 Michael Hanselmann
391 dcb93971 Michael Hanselmann
392 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
393 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
394 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
395 ecb215b5 Michael Hanselmann

396 ecb215b5 Michael Hanselmann
  Args:
397 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
398 396e1b78 Michael Hanselmann
  """
399 396e1b78 Michael Hanselmann
  env = {
400 0e137c28 Iustin Pop
    "OP_TARGET": name,
401 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
402 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
403 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
404 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
405 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
406 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
407 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
408 396e1b78 Michael Hanselmann
  }
409 396e1b78 Michael Hanselmann
410 396e1b78 Michael Hanselmann
  if nics:
411 396e1b78 Michael Hanselmann
    nic_count = len(nics)
412 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
413 396e1b78 Michael Hanselmann
      if ip is None:
414 396e1b78 Michael Hanselmann
        ip = ""
415 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
416 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
417 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
418 396e1b78 Michael Hanselmann
  else:
419 396e1b78 Michael Hanselmann
    nic_count = 0
420 396e1b78 Michael Hanselmann
421 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
422 396e1b78 Michael Hanselmann
423 396e1b78 Michael Hanselmann
  return env
424 396e1b78 Michael Hanselmann
425 396e1b78 Michael Hanselmann
426 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
427 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
428 ecb215b5 Michael Hanselmann

429 ecb215b5 Michael Hanselmann
  Args:
430 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
431 ecb215b5 Michael Hanselmann
    override: dict of values to override
432 ecb215b5 Michael Hanselmann
  """
433 338e51e8 Iustin Pop
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
434 396e1b78 Michael Hanselmann
  args = {
435 396e1b78 Michael Hanselmann
    'name': instance.name,
436 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
437 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
438 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
439 396e1b78 Michael Hanselmann
    'status': instance.os,
440 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
441 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
442 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
443 396e1b78 Michael Hanselmann
  }
444 396e1b78 Michael Hanselmann
  if override:
445 396e1b78 Michael Hanselmann
    args.update(override)
446 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
447 396e1b78 Michael Hanselmann
448 396e1b78 Michael Hanselmann
449 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
450 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
451 bf6929a2 Alexander Schreiber

452 bf6929a2 Alexander Schreiber
  """
453 bf6929a2 Alexander Schreiber
  # check bridges existance
454 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
455 72737a7f Iustin Pop
  if not lu.rpc.call_bridges_exist(instance.primary_node, brlist):
456 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
457 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
458 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
459 bf6929a2 Alexander Schreiber
460 bf6929a2 Alexander Schreiber
461 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
462 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
463 a8083063 Iustin Pop

464 a8083063 Iustin Pop
  """
465 a8083063 Iustin Pop
  _OP_REQP = []
466 a8083063 Iustin Pop
467 a8083063 Iustin Pop
  def CheckPrereq(self):
468 a8083063 Iustin Pop
    """Check prerequisites.
469 a8083063 Iustin Pop

470 a8083063 Iustin Pop
    This checks whether the cluster is empty.
471 a8083063 Iustin Pop

472 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
473 a8083063 Iustin Pop

474 a8083063 Iustin Pop
    """
475 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
476 a8083063 Iustin Pop
477 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
478 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
479 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
480 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
481 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
482 db915bd1 Michael Hanselmann
    if instancelist:
483 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
484 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
485 a8083063 Iustin Pop
486 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
487 a8083063 Iustin Pop
    """Destroys the cluster.
488 a8083063 Iustin Pop

489 a8083063 Iustin Pop
    """
490 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
491 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
492 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
493 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
494 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
495 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
496 140aa4a8 Iustin Pop
    return master
497 a8083063 Iustin Pop
498 a8083063 Iustin Pop
499 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
500 a8083063 Iustin Pop
  """Verifies the cluster status.
501 a8083063 Iustin Pop

502 a8083063 Iustin Pop
  """
503 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
504 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
505 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
506 d4b9d97f Guido Trotter
  REQ_BGL = False
507 d4b9d97f Guido Trotter
508 d4b9d97f Guido Trotter
  def ExpandNames(self):
509 d4b9d97f Guido Trotter
    self.needed_locks = {
510 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
511 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
512 d4b9d97f Guido Trotter
    }
513 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
514 a8083063 Iustin Pop
515 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
516 a8083063 Iustin Pop
                  remote_version, feedback_fn):
517 a8083063 Iustin Pop
    """Run multiple tests against a node.
518 a8083063 Iustin Pop

519 a8083063 Iustin Pop
    Test list:
520 a8083063 Iustin Pop
      - compares ganeti version
521 a8083063 Iustin Pop
      - checks vg existance and size > 20G
522 a8083063 Iustin Pop
      - checks config file checksum
523 a8083063 Iustin Pop
      - checks ssh to other nodes
524 a8083063 Iustin Pop

525 a8083063 Iustin Pop
    Args:
526 a8083063 Iustin Pop
      node: name of the node to check
527 a8083063 Iustin Pop
      file_list: required list of files
528 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
529 098c0958 Michael Hanselmann

530 a8083063 Iustin Pop
    """
531 a8083063 Iustin Pop
    # compares ganeti version
532 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
533 a8083063 Iustin Pop
    if not remote_version:
534 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
535 a8083063 Iustin Pop
      return True
536 a8083063 Iustin Pop
537 a8083063 Iustin Pop
    if local_version != remote_version:
538 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
539 a8083063 Iustin Pop
                      (local_version, node, remote_version))
540 a8083063 Iustin Pop
      return True
541 a8083063 Iustin Pop
542 a8083063 Iustin Pop
    # checks vg existance and size > 20G
543 a8083063 Iustin Pop
544 a8083063 Iustin Pop
    bad = False
545 a8083063 Iustin Pop
    if not vglist:
546 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
547 a8083063 Iustin Pop
                      (node,))
548 a8083063 Iustin Pop
      bad = True
549 a8083063 Iustin Pop
    else:
550 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
551 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
552 a8083063 Iustin Pop
      if vgstatus:
553 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
554 a8083063 Iustin Pop
        bad = True
555 a8083063 Iustin Pop
556 2eb78bc8 Guido Trotter
    if not node_result:
557 2eb78bc8 Guido Trotter
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
558 2eb78bc8 Guido Trotter
      return True
559 2eb78bc8 Guido Trotter
560 a8083063 Iustin Pop
    # checks config file checksum
561 a8083063 Iustin Pop
    # checks ssh to any
562 a8083063 Iustin Pop
563 a8083063 Iustin Pop
    if 'filelist' not in node_result:
564 a8083063 Iustin Pop
      bad = True
565 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
566 a8083063 Iustin Pop
    else:
567 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
568 a8083063 Iustin Pop
      for file_name in file_list:
569 a8083063 Iustin Pop
        if file_name not in remote_cksum:
570 a8083063 Iustin Pop
          bad = True
571 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
572 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
573 a8083063 Iustin Pop
          bad = True
574 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
575 a8083063 Iustin Pop
576 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
577 a8083063 Iustin Pop
      bad = True
578 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
579 a8083063 Iustin Pop
    else:
580 a8083063 Iustin Pop
      if node_result['nodelist']:
581 a8083063 Iustin Pop
        bad = True
582 a8083063 Iustin Pop
        for node in node_result['nodelist']:
583 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
584 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
585 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
586 9d4bfc96 Iustin Pop
      bad = True
587 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
588 9d4bfc96 Iustin Pop
    else:
589 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
590 9d4bfc96 Iustin Pop
        bad = True
591 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
592 9d4bfc96 Iustin Pop
        for node in nlist:
593 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
594 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
595 9d4bfc96 Iustin Pop
596 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
597 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
598 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
599 e69d05fd Iustin Pop
        if hv_result is not None:
600 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
601 e69d05fd Iustin Pop
                      (hv_name, hv_result))
602 a8083063 Iustin Pop
    return bad
603 a8083063 Iustin Pop
604 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
605 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
606 a8083063 Iustin Pop
    """Verify an instance.
607 a8083063 Iustin Pop

608 a8083063 Iustin Pop
    This function checks to see if the required block devices are
609 a8083063 Iustin Pop
    available on the instance's node.
610 a8083063 Iustin Pop

611 a8083063 Iustin Pop
    """
612 a8083063 Iustin Pop
    bad = False
613 a8083063 Iustin Pop
614 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
615 a8083063 Iustin Pop
616 a8083063 Iustin Pop
    node_vol_should = {}
617 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
618 a8083063 Iustin Pop
619 a8083063 Iustin Pop
    for node in node_vol_should:
620 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
621 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
622 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
623 a8083063 Iustin Pop
                          (volume, node))
624 a8083063 Iustin Pop
          bad = True
625 a8083063 Iustin Pop
626 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
627 a872dae6 Guido Trotter
      if (node_current not in node_instance or
628 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
629 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
630 a8083063 Iustin Pop
                        (instance, node_current))
631 a8083063 Iustin Pop
        bad = True
632 a8083063 Iustin Pop
633 a8083063 Iustin Pop
    for node in node_instance:
634 a8083063 Iustin Pop
      if (not node == node_current):
635 a8083063 Iustin Pop
        if instance in node_instance[node]:
636 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
637 a8083063 Iustin Pop
                          (instance, node))
638 a8083063 Iustin Pop
          bad = True
639 a8083063 Iustin Pop
640 6a438c98 Michael Hanselmann
    return bad
641 a8083063 Iustin Pop
642 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
643 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
644 a8083063 Iustin Pop

645 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
646 a8083063 Iustin Pop
    reported as unknown.
647 a8083063 Iustin Pop

648 a8083063 Iustin Pop
    """
649 a8083063 Iustin Pop
    bad = False
650 a8083063 Iustin Pop
651 a8083063 Iustin Pop
    for node in node_vol_is:
652 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
653 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
654 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
655 a8083063 Iustin Pop
                      (volume, node))
656 a8083063 Iustin Pop
          bad = True
657 a8083063 Iustin Pop
    return bad
658 a8083063 Iustin Pop
659 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
660 a8083063 Iustin Pop
    """Verify the list of running instances.
661 a8083063 Iustin Pop

662 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
663 a8083063 Iustin Pop

664 a8083063 Iustin Pop
    """
665 a8083063 Iustin Pop
    bad = False
666 a8083063 Iustin Pop
    for node in node_instance:
667 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
668 a8083063 Iustin Pop
        if runninginstance not in instancelist:
669 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
670 a8083063 Iustin Pop
                          (runninginstance, node))
671 a8083063 Iustin Pop
          bad = True
672 a8083063 Iustin Pop
    return bad
673 a8083063 Iustin Pop
674 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
675 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
676 2b3b6ddd Guido Trotter

677 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
678 2b3b6ddd Guido Trotter
    was primary for.
679 2b3b6ddd Guido Trotter

680 2b3b6ddd Guido Trotter
    """
681 2b3b6ddd Guido Trotter
    bad = False
682 2b3b6ddd Guido Trotter
683 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
684 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
685 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
686 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
687 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
688 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
689 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
690 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
691 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
692 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
693 2b3b6ddd Guido Trotter
        needed_mem = 0
694 2b3b6ddd Guido Trotter
        for instance in instances:
695 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
696 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
697 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
698 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
699 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
700 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
701 2b3b6ddd Guido Trotter
          bad = True
702 2b3b6ddd Guido Trotter
    return bad
703 2b3b6ddd Guido Trotter
704 a8083063 Iustin Pop
  def CheckPrereq(self):
705 a8083063 Iustin Pop
    """Check prerequisites.
706 a8083063 Iustin Pop

707 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
708 e54c4c5e Guido Trotter
    all its members are valid.
709 a8083063 Iustin Pop

710 a8083063 Iustin Pop
    """
711 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
712 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
713 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
714 a8083063 Iustin Pop
715 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
716 d8fff41c Guido Trotter
    """Build hooks env.
717 d8fff41c Guido Trotter

718 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
719 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
720 d8fff41c Guido Trotter

721 d8fff41c Guido Trotter
    """
722 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
723 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
724 d8fff41c Guido Trotter
    env = {}
725 d8fff41c Guido Trotter
    return env, [], all_nodes
726 d8fff41c Guido Trotter
727 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
728 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
729 a8083063 Iustin Pop

730 a8083063 Iustin Pop
    """
731 a8083063 Iustin Pop
    bad = False
732 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
733 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
734 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
735 a8083063 Iustin Pop
736 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
737 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
738 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
739 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
740 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
741 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
742 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
743 a8083063 Iustin Pop
    node_volume = {}
744 a8083063 Iustin Pop
    node_instance = {}
745 9c9c7d30 Guido Trotter
    node_info = {}
746 26b6af5e Guido Trotter
    instance_cfg = {}
747 a8083063 Iustin Pop
748 a8083063 Iustin Pop
    # FIXME: verify OS list
749 a8083063 Iustin Pop
    # do local checksums
750 d6a02168 Michael Hanselmann
    file_names = []
751 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
752 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
753 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
754 a8083063 Iustin Pop
755 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
756 72737a7f Iustin Pop
    all_volumeinfo = self.rpc.call_volume_list(nodelist, vg_name)
757 72737a7f Iustin Pop
    all_instanceinfo = self.rpc.call_instance_list(nodelist, hypervisors)
758 72737a7f Iustin Pop
    all_vglist = self.rpc.call_vg_list(nodelist)
759 a8083063 Iustin Pop
    node_verify_param = {
760 a8083063 Iustin Pop
      'filelist': file_names,
761 a8083063 Iustin Pop
      'nodelist': nodelist,
762 e69d05fd Iustin Pop
      'hypervisor': hypervisors,
763 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
764 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
765 a8083063 Iustin Pop
      }
766 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
767 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
768 72737a7f Iustin Pop
    all_rversion = self.rpc.call_version(nodelist)
769 72737a7f Iustin Pop
    all_ninfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(),
770 72737a7f Iustin Pop
                                        self.cfg.GetHypervisorType())
771 a8083063 Iustin Pop
772 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
773 a8083063 Iustin Pop
    for node in nodelist:
774 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
775 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
776 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
777 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
778 a8083063 Iustin Pop
      bad = bad or result
779 a8083063 Iustin Pop
780 a8083063 Iustin Pop
      # node_volume
781 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
782 a8083063 Iustin Pop
783 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
784 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
785 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
786 b63ed789 Iustin Pop
        bad = True
787 b63ed789 Iustin Pop
        node_volume[node] = {}
788 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
789 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
790 a8083063 Iustin Pop
        bad = True
791 a8083063 Iustin Pop
        continue
792 b63ed789 Iustin Pop
      else:
793 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
794 a8083063 Iustin Pop
795 a8083063 Iustin Pop
      # node_instance
796 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
797 a8083063 Iustin Pop
      if type(nodeinstance) != list:
798 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
799 a8083063 Iustin Pop
        bad = True
800 a8083063 Iustin Pop
        continue
801 a8083063 Iustin Pop
802 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
803 a8083063 Iustin Pop
804 9c9c7d30 Guido Trotter
      # node_info
805 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
806 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
807 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
808 9c9c7d30 Guido Trotter
        bad = True
809 9c9c7d30 Guido Trotter
        continue
810 9c9c7d30 Guido Trotter
811 9c9c7d30 Guido Trotter
      try:
812 9c9c7d30 Guido Trotter
        node_info[node] = {
813 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
814 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
815 93e4c50b Guido Trotter
          "pinst": [],
816 93e4c50b Guido Trotter
          "sinst": [],
817 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
818 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
819 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
820 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
821 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
822 36e7da50 Guido Trotter
          # secondary.
823 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
824 9c9c7d30 Guido Trotter
        }
825 9c9c7d30 Guido Trotter
      except ValueError:
826 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
827 9c9c7d30 Guido Trotter
        bad = True
828 9c9c7d30 Guido Trotter
        continue
829 9c9c7d30 Guido Trotter
830 a8083063 Iustin Pop
    node_vol_should = {}
831 a8083063 Iustin Pop
832 a8083063 Iustin Pop
    for instance in instancelist:
833 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
834 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
835 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
836 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
837 c5705f58 Guido Trotter
      bad = bad or result
838 a8083063 Iustin Pop
839 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
840 a8083063 Iustin Pop
841 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
842 26b6af5e Guido Trotter
843 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
844 93e4c50b Guido Trotter
      if pnode in node_info:
845 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
846 93e4c50b Guido Trotter
      else:
847 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
848 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
849 93e4c50b Guido Trotter
        bad = True
850 93e4c50b Guido Trotter
851 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
852 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
853 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
854 93e4c50b Guido Trotter
      # supported either.
855 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
856 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
857 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
858 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
859 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
860 93e4c50b Guido Trotter
                    % instance)
861 93e4c50b Guido Trotter
862 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
863 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
864 3924700f Iustin Pop
865 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
866 93e4c50b Guido Trotter
        if snode in node_info:
867 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
868 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
869 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
870 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
871 93e4c50b Guido Trotter
        else:
872 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
873 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
874 93e4c50b Guido Trotter
875 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
876 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
877 a8083063 Iustin Pop
                                       feedback_fn)
878 a8083063 Iustin Pop
    bad = bad or result
879 a8083063 Iustin Pop
880 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
881 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
882 a8083063 Iustin Pop
                                         feedback_fn)
883 a8083063 Iustin Pop
    bad = bad or result
884 a8083063 Iustin Pop
885 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
886 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
887 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
888 e54c4c5e Guido Trotter
      bad = bad or result
889 2b3b6ddd Guido Trotter
890 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
891 2b3b6ddd Guido Trotter
    if i_non_redundant:
892 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
893 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
894 2b3b6ddd Guido Trotter
895 3924700f Iustin Pop
    if i_non_a_balanced:
896 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
897 3924700f Iustin Pop
                  % len(i_non_a_balanced))
898 3924700f Iustin Pop
899 34290825 Michael Hanselmann
    return not bad
900 a8083063 Iustin Pop
901 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
902 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
903 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
904 d8fff41c Guido Trotter

905 d8fff41c Guido Trotter
    Args:
906 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
907 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
908 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
909 d8fff41c Guido Trotter
      lu_result: previous Exec result
910 d8fff41c Guido Trotter

911 d8fff41c Guido Trotter
    """
912 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
913 38206f3c Iustin Pop
    # their results
914 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
915 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
916 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
917 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
918 d8fff41c Guido Trotter
      if not hooks_results:
919 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
920 d8fff41c Guido Trotter
        lu_result = 1
921 d8fff41c Guido Trotter
      else:
922 d8fff41c Guido Trotter
        for node_name in hooks_results:
923 d8fff41c Guido Trotter
          show_node_header = True
924 d8fff41c Guido Trotter
          res = hooks_results[node_name]
925 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
926 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
927 d8fff41c Guido Trotter
            lu_result = 1
928 d8fff41c Guido Trotter
            continue
929 d8fff41c Guido Trotter
          for script, hkr, output in res:
930 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
931 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
932 d8fff41c Guido Trotter
              # failing hooks on that node
933 d8fff41c Guido Trotter
              if show_node_header:
934 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
935 d8fff41c Guido Trotter
                show_node_header = False
936 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
937 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
938 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
939 d8fff41c Guido Trotter
              lu_result = 1
940 d8fff41c Guido Trotter
941 d8fff41c Guido Trotter
      return lu_result
942 d8fff41c Guido Trotter
943 a8083063 Iustin Pop
944 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
945 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
946 2c95a8d4 Iustin Pop

947 2c95a8d4 Iustin Pop
  """
948 2c95a8d4 Iustin Pop
  _OP_REQP = []
949 d4b9d97f Guido Trotter
  REQ_BGL = False
950 d4b9d97f Guido Trotter
951 d4b9d97f Guido Trotter
  def ExpandNames(self):
952 d4b9d97f Guido Trotter
    self.needed_locks = {
953 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
954 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
955 d4b9d97f Guido Trotter
    }
956 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
957 2c95a8d4 Iustin Pop
958 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
959 2c95a8d4 Iustin Pop
    """Check prerequisites.
960 2c95a8d4 Iustin Pop

961 2c95a8d4 Iustin Pop
    This has no prerequisites.
962 2c95a8d4 Iustin Pop

963 2c95a8d4 Iustin Pop
    """
964 2c95a8d4 Iustin Pop
    pass
965 2c95a8d4 Iustin Pop
966 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
967 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
968 2c95a8d4 Iustin Pop

969 2c95a8d4 Iustin Pop
    """
970 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
971 2c95a8d4 Iustin Pop
972 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
973 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
974 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
975 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
976 2c95a8d4 Iustin Pop
977 2c95a8d4 Iustin Pop
    nv_dict = {}
978 2c95a8d4 Iustin Pop
    for inst in instances:
979 2c95a8d4 Iustin Pop
      inst_lvs = {}
980 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
981 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
982 2c95a8d4 Iustin Pop
        continue
983 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
984 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
985 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
986 2c95a8d4 Iustin Pop
        for vol in vol_list:
987 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
988 2c95a8d4 Iustin Pop
989 2c95a8d4 Iustin Pop
    if not nv_dict:
990 2c95a8d4 Iustin Pop
      return result
991 2c95a8d4 Iustin Pop
992 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
993 2c95a8d4 Iustin Pop
994 2c95a8d4 Iustin Pop
    to_act = set()
995 2c95a8d4 Iustin Pop
    for node in nodes:
996 2c95a8d4 Iustin Pop
      # node_volume
997 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
998 2c95a8d4 Iustin Pop
999 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1000 9a4f63d1 Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1001 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1002 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1003 9a4f63d1 Iustin Pop
        logging.warning("Connection to node %s failed or invalid data"
1004 9a4f63d1 Iustin Pop
                        " returned", node)
1005 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1006 2c95a8d4 Iustin Pop
        continue
1007 2c95a8d4 Iustin Pop
1008 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1009 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1010 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1011 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1012 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1013 2c95a8d4 Iustin Pop
1014 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1015 b63ed789 Iustin Pop
    # data better
1016 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1017 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1018 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1019 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1020 b63ed789 Iustin Pop
1021 2c95a8d4 Iustin Pop
    return result
1022 2c95a8d4 Iustin Pop
1023 2c95a8d4 Iustin Pop
1024 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1025 07bd8a51 Iustin Pop
  """Rename the cluster.
1026 07bd8a51 Iustin Pop

1027 07bd8a51 Iustin Pop
  """
1028 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1029 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1030 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1031 07bd8a51 Iustin Pop
1032 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1033 07bd8a51 Iustin Pop
    """Build hooks env.
1034 07bd8a51 Iustin Pop

1035 07bd8a51 Iustin Pop
    """
1036 07bd8a51 Iustin Pop
    env = {
1037 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1038 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1039 07bd8a51 Iustin Pop
      }
1040 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1041 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1042 07bd8a51 Iustin Pop
1043 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1044 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1045 07bd8a51 Iustin Pop

1046 07bd8a51 Iustin Pop
    """
1047 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1048 07bd8a51 Iustin Pop
1049 bcf043c9 Iustin Pop
    new_name = hostname.name
1050 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1051 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1052 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1053 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1054 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1055 07bd8a51 Iustin Pop
                                 " cluster has changed")
1056 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1057 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1058 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1059 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1060 07bd8a51 Iustin Pop
                                   new_ip)
1061 07bd8a51 Iustin Pop
1062 07bd8a51 Iustin Pop
    self.op.name = new_name
1063 07bd8a51 Iustin Pop
1064 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1065 07bd8a51 Iustin Pop
    """Rename the cluster.
1066 07bd8a51 Iustin Pop

1067 07bd8a51 Iustin Pop
    """
1068 07bd8a51 Iustin Pop
    clustername = self.op.name
1069 07bd8a51 Iustin Pop
    ip = self.ip
1070 07bd8a51 Iustin Pop
1071 07bd8a51 Iustin Pop
    # shutdown the master IP
1072 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1073 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
1074 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1075 07bd8a51 Iustin Pop
1076 07bd8a51 Iustin Pop
    try:
1077 07bd8a51 Iustin Pop
      # modify the sstore
1078 d6a02168 Michael Hanselmann
      # TODO: sstore
1079 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1080 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1081 07bd8a51 Iustin Pop
1082 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1083 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1084 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1085 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1086 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1087 07bd8a51 Iustin Pop
1088 9a4f63d1 Iustin Pop
      logging.debug("Copying updated ssconf data to all nodes")
1089 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1090 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1091 72737a7f Iustin Pop
        result = self.rpc.call_upload_file(dist_nodes, fname)
1092 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1093 07bd8a51 Iustin Pop
          if not result[to_node]:
1094 86d9d3bb Iustin Pop
            self.LogWarning("Copy of file %s to node %s failed",
1095 86d9d3bb Iustin Pop
                            fname, to_node)
1096 07bd8a51 Iustin Pop
    finally:
1097 72737a7f Iustin Pop
      if not self.rpc.call_node_start_master(master, False):
1098 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1099 86d9d3bb Iustin Pop
                        " the master, please restart manually.")
1100 07bd8a51 Iustin Pop
1101 07bd8a51 Iustin Pop
1102 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1103 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1104 8084f9f6 Manuel Franceschini

1105 8084f9f6 Manuel Franceschini
  Args:
1106 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1107 8084f9f6 Manuel Franceschini

1108 8084f9f6 Manuel Franceschini
  Returns:
1109 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1110 8084f9f6 Manuel Franceschini

1111 8084f9f6 Manuel Franceschini
  """
1112 8084f9f6 Manuel Franceschini
  if disk.children:
1113 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1114 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1115 8084f9f6 Manuel Franceschini
        return True
1116 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1117 8084f9f6 Manuel Franceschini
1118 8084f9f6 Manuel Franceschini
1119 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1120 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1121 8084f9f6 Manuel Franceschini

1122 8084f9f6 Manuel Franceschini
  """
1123 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1124 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1125 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1126 c53279cf Guido Trotter
  REQ_BGL = False
1127 c53279cf Guido Trotter
1128 c53279cf Guido Trotter
  def ExpandNames(self):
1129 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1130 c53279cf Guido Trotter
    # all nodes to be modified.
1131 c53279cf Guido Trotter
    self.needed_locks = {
1132 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1133 c53279cf Guido Trotter
    }
1134 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1135 8084f9f6 Manuel Franceschini
1136 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1137 8084f9f6 Manuel Franceschini
    """Build hooks env.
1138 8084f9f6 Manuel Franceschini

1139 8084f9f6 Manuel Franceschini
    """
1140 8084f9f6 Manuel Franceschini
    env = {
1141 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1142 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1143 8084f9f6 Manuel Franceschini
      }
1144 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1145 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1146 8084f9f6 Manuel Franceschini
1147 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1148 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1149 8084f9f6 Manuel Franceschini

1150 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1151 5f83e263 Iustin Pop
    if the given volume group is valid.
1152 8084f9f6 Manuel Franceschini

1153 8084f9f6 Manuel Franceschini
    """
1154 c53279cf Guido Trotter
    # FIXME: This only works because there is only one parameter that can be
1155 c53279cf Guido Trotter
    # changed or removed.
1156 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1157 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1158 8084f9f6 Manuel Franceschini
      for inst in instances:
1159 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1160 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1161 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1162 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1163 8084f9f6 Manuel Franceschini
1164 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1165 779c15bb Iustin Pop
1166 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1167 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1168 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1169 8084f9f6 Manuel Franceschini
      for node in node_list:
1170 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1171 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1172 8084f9f6 Manuel Franceschini
        if vgstatus:
1173 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1174 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1175 8084f9f6 Manuel Franceschini
1176 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1177 779c15bb Iustin Pop
    # beparams changes do not need validation (we can't validate?),
1178 779c15bb Iustin Pop
    # but we still process here
1179 779c15bb Iustin Pop
    if self.op.beparams:
1180 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1181 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1182 779c15bb Iustin Pop
1183 779c15bb Iustin Pop
    # hypervisor list/parameters
1184 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1185 779c15bb Iustin Pop
    if self.op.hvparams:
1186 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1187 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1188 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1189 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1190 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1191 779c15bb Iustin Pop
        else:
1192 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1193 779c15bb Iustin Pop
1194 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1195 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1196 779c15bb Iustin Pop
    else:
1197 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1198 779c15bb Iustin Pop
1199 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1200 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1201 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1202 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1203 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1204 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1205 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1206 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1207 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1208 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1209 779c15bb Iustin Pop
1210 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1211 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1212 8084f9f6 Manuel Franceschini

1213 8084f9f6 Manuel Franceschini
    """
1214 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1215 779c15bb Iustin Pop
      if self.op.vg_name != self.cfg.GetVGName():
1216 779c15bb Iustin Pop
        self.cfg.SetVGName(self.op.vg_name)
1217 779c15bb Iustin Pop
      else:
1218 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1219 779c15bb Iustin Pop
                    " state, not changing")
1220 779c15bb Iustin Pop
    if self.op.hvparams:
1221 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1222 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1223 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1224 779c15bb Iustin Pop
    if self.op.beparams:
1225 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1226 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1227 8084f9f6 Manuel Franceschini
1228 8084f9f6 Manuel Franceschini
1229 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1230 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1231 a8083063 Iustin Pop

1232 a8083063 Iustin Pop
  """
1233 a8083063 Iustin Pop
  if not instance.disks:
1234 a8083063 Iustin Pop
    return True
1235 a8083063 Iustin Pop
1236 a8083063 Iustin Pop
  if not oneshot:
1237 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1238 a8083063 Iustin Pop
1239 a8083063 Iustin Pop
  node = instance.primary_node
1240 a8083063 Iustin Pop
1241 a8083063 Iustin Pop
  for dev in instance.disks:
1242 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1243 a8083063 Iustin Pop
1244 a8083063 Iustin Pop
  retries = 0
1245 a8083063 Iustin Pop
  while True:
1246 a8083063 Iustin Pop
    max_time = 0
1247 a8083063 Iustin Pop
    done = True
1248 a8083063 Iustin Pop
    cumul_degraded = False
1249 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1250 a8083063 Iustin Pop
    if not rstats:
1251 86d9d3bb Iustin Pop
      lu.LogWarning("Can't get any data from node %s", node)
1252 a8083063 Iustin Pop
      retries += 1
1253 a8083063 Iustin Pop
      if retries >= 10:
1254 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1255 3ecf6786 Iustin Pop
                                 " aborting." % node)
1256 a8083063 Iustin Pop
      time.sleep(6)
1257 a8083063 Iustin Pop
      continue
1258 a8083063 Iustin Pop
    retries = 0
1259 a8083063 Iustin Pop
    for i in range(len(rstats)):
1260 a8083063 Iustin Pop
      mstat = rstats[i]
1261 a8083063 Iustin Pop
      if mstat is None:
1262 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1263 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1264 a8083063 Iustin Pop
        continue
1265 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1266 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1267 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1268 a8083063 Iustin Pop
      if perc_done is not None:
1269 a8083063 Iustin Pop
        done = False
1270 a8083063 Iustin Pop
        if est_time is not None:
1271 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1272 a8083063 Iustin Pop
          max_time = est_time
1273 a8083063 Iustin Pop
        else:
1274 a8083063 Iustin Pop
          rem_time = "no time estimate"
1275 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1276 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1277 a8083063 Iustin Pop
    if done or oneshot:
1278 a8083063 Iustin Pop
      break
1279 a8083063 Iustin Pop
1280 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1281 a8083063 Iustin Pop
1282 a8083063 Iustin Pop
  if done:
1283 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1284 a8083063 Iustin Pop
  return not cumul_degraded
1285 a8083063 Iustin Pop
1286 a8083063 Iustin Pop
1287 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1288 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1289 a8083063 Iustin Pop

1290 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1291 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1292 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1293 0834c866 Iustin Pop

1294 a8083063 Iustin Pop
  """
1295 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1296 0834c866 Iustin Pop
  if ldisk:
1297 0834c866 Iustin Pop
    idx = 6
1298 0834c866 Iustin Pop
  else:
1299 0834c866 Iustin Pop
    idx = 5
1300 a8083063 Iustin Pop
1301 a8083063 Iustin Pop
  result = True
1302 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1303 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1304 a8083063 Iustin Pop
    if not rstats:
1305 9a4f63d1 Iustin Pop
      logging.warning("Node %s: disk degraded, not found or node down", node)
1306 a8083063 Iustin Pop
      result = False
1307 a8083063 Iustin Pop
    else:
1308 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1309 a8083063 Iustin Pop
  if dev.children:
1310 a8083063 Iustin Pop
    for child in dev.children:
1311 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1312 a8083063 Iustin Pop
1313 a8083063 Iustin Pop
  return result
1314 a8083063 Iustin Pop
1315 a8083063 Iustin Pop
1316 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1317 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1318 a8083063 Iustin Pop

1319 a8083063 Iustin Pop
  """
1320 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1321 6bf01bbb Guido Trotter
  REQ_BGL = False
1322 a8083063 Iustin Pop
1323 6bf01bbb Guido Trotter
  def ExpandNames(self):
1324 1f9430d6 Iustin Pop
    if self.op.names:
1325 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1326 1f9430d6 Iustin Pop
1327 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1328 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1329 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1330 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1331 1f9430d6 Iustin Pop
1332 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1333 6bf01bbb Guido Trotter
    self.needed_locks = {}
1334 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1335 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1336 6bf01bbb Guido Trotter
1337 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1338 6bf01bbb Guido Trotter
    """Check prerequisites.
1339 6bf01bbb Guido Trotter

1340 6bf01bbb Guido Trotter
    """
1341 6bf01bbb Guido Trotter
1342 1f9430d6 Iustin Pop
  @staticmethod
1343 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1344 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1345 1f9430d6 Iustin Pop

1346 1f9430d6 Iustin Pop
      Args:
1347 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1348 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1349 1f9430d6 Iustin Pop

1350 1f9430d6 Iustin Pop
      Returns:
1351 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1352 1f9430d6 Iustin Pop
             nodes as
1353 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1354 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1355 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1356 1f9430d6 Iustin Pop
                  }
1357 1f9430d6 Iustin Pop

1358 1f9430d6 Iustin Pop
    """
1359 1f9430d6 Iustin Pop
    all_os = {}
1360 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1361 1f9430d6 Iustin Pop
      if not nr:
1362 1f9430d6 Iustin Pop
        continue
1363 b4de68a9 Iustin Pop
      for os_obj in nr:
1364 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1365 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1366 1f9430d6 Iustin Pop
          # for each node in node_list
1367 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1368 1f9430d6 Iustin Pop
          for nname in node_list:
1369 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1370 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1371 1f9430d6 Iustin Pop
    return all_os
1372 a8083063 Iustin Pop
1373 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1374 a8083063 Iustin Pop
    """Compute the list of OSes.
1375 a8083063 Iustin Pop

1376 a8083063 Iustin Pop
    """
1377 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1378 72737a7f Iustin Pop
    node_data = self.rpc.call_os_diagnose(node_list)
1379 a8083063 Iustin Pop
    if node_data == False:
1380 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1381 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1382 1f9430d6 Iustin Pop
    output = []
1383 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1384 1f9430d6 Iustin Pop
      row = []
1385 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1386 1f9430d6 Iustin Pop
        if field == "name":
1387 1f9430d6 Iustin Pop
          val = os_name
1388 1f9430d6 Iustin Pop
        elif field == "valid":
1389 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1390 1f9430d6 Iustin Pop
        elif field == "node_status":
1391 1f9430d6 Iustin Pop
          val = {}
1392 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1393 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1394 1f9430d6 Iustin Pop
        else:
1395 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1396 1f9430d6 Iustin Pop
        row.append(val)
1397 1f9430d6 Iustin Pop
      output.append(row)
1398 1f9430d6 Iustin Pop
1399 1f9430d6 Iustin Pop
    return output
1400 a8083063 Iustin Pop
1401 a8083063 Iustin Pop
1402 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1403 a8083063 Iustin Pop
  """Logical unit for removing a node.
1404 a8083063 Iustin Pop

1405 a8083063 Iustin Pop
  """
1406 a8083063 Iustin Pop
  HPATH = "node-remove"
1407 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1408 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1409 a8083063 Iustin Pop
1410 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1411 a8083063 Iustin Pop
    """Build hooks env.
1412 a8083063 Iustin Pop

1413 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1414 d08869ee Guido Trotter
    node would then be impossible to remove.
1415 a8083063 Iustin Pop

1416 a8083063 Iustin Pop
    """
1417 396e1b78 Michael Hanselmann
    env = {
1418 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1419 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1420 396e1b78 Michael Hanselmann
      }
1421 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1422 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1423 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1424 a8083063 Iustin Pop
1425 a8083063 Iustin Pop
  def CheckPrereq(self):
1426 a8083063 Iustin Pop
    """Check prerequisites.
1427 a8083063 Iustin Pop

1428 a8083063 Iustin Pop
    This checks:
1429 a8083063 Iustin Pop
     - the node exists in the configuration
1430 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1431 a8083063 Iustin Pop
     - it's not the master
1432 a8083063 Iustin Pop

1433 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1434 a8083063 Iustin Pop

1435 a8083063 Iustin Pop
    """
1436 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1437 a8083063 Iustin Pop
    if node is None:
1438 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1439 a8083063 Iustin Pop
1440 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1441 a8083063 Iustin Pop
1442 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1443 a8083063 Iustin Pop
    if node.name == masternode:
1444 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1445 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1446 a8083063 Iustin Pop
1447 a8083063 Iustin Pop
    for instance_name in instance_list:
1448 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1449 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1450 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1451 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1452 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1453 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1454 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1455 a8083063 Iustin Pop
    self.op.node_name = node.name
1456 a8083063 Iustin Pop
    self.node = node
1457 a8083063 Iustin Pop
1458 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1459 a8083063 Iustin Pop
    """Removes the node from the cluster.
1460 a8083063 Iustin Pop

1461 a8083063 Iustin Pop
    """
1462 a8083063 Iustin Pop
    node = self.node
1463 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1464 9a4f63d1 Iustin Pop
                 node.name)
1465 a8083063 Iustin Pop
1466 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1467 a8083063 Iustin Pop
1468 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1469 c8a0948f Michael Hanselmann
1470 a8083063 Iustin Pop
1471 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1472 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1473 a8083063 Iustin Pop

1474 a8083063 Iustin Pop
  """
1475 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1476 35705d8f Guido Trotter
  REQ_BGL = False
1477 a8083063 Iustin Pop
1478 35705d8f Guido Trotter
  def ExpandNames(self):
1479 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1480 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1481 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1482 e8a4c138 Iustin Pop
      "bootid",
1483 e8a4c138 Iustin Pop
      "ctotal",
1484 e8a4c138 Iustin Pop
      ])
1485 a8083063 Iustin Pop
1486 c8d8b4c8 Iustin Pop
    self.static_fields = frozenset([
1487 c8d8b4c8 Iustin Pop
      "name", "pinst_cnt", "sinst_cnt",
1488 c8d8b4c8 Iustin Pop
      "pinst_list", "sinst_list",
1489 c8d8b4c8 Iustin Pop
      "pip", "sip", "tags",
1490 38d7239a Iustin Pop
      "serial_no",
1491 c8d8b4c8 Iustin Pop
      ])
1492 c8d8b4c8 Iustin Pop
1493 c8d8b4c8 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
1494 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1495 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1496 a8083063 Iustin Pop
1497 35705d8f Guido Trotter
    self.needed_locks = {}
1498 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1499 c8d8b4c8 Iustin Pop
1500 c8d8b4c8 Iustin Pop
    if self.op.names:
1501 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1502 35705d8f Guido Trotter
    else:
1503 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1504 c8d8b4c8 Iustin Pop
1505 c8d8b4c8 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
1506 c8d8b4c8 Iustin Pop
    if self.do_locking:
1507 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1508 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1509 c8d8b4c8 Iustin Pop
1510 35705d8f Guido Trotter
1511 35705d8f Guido Trotter
  def CheckPrereq(self):
1512 35705d8f Guido Trotter
    """Check prerequisites.
1513 35705d8f Guido Trotter

1514 35705d8f Guido Trotter
    """
1515 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1516 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1517 c8d8b4c8 Iustin Pop
    pass
1518 a8083063 Iustin Pop
1519 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1520 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1521 a8083063 Iustin Pop

1522 a8083063 Iustin Pop
    """
1523 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1524 c8d8b4c8 Iustin Pop
    if self.do_locking:
1525 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1526 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1527 3fa93523 Guido Trotter
      nodenames = self.wanted
1528 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1529 3fa93523 Guido Trotter
      if missing:
1530 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1531 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1532 c8d8b4c8 Iustin Pop
    else:
1533 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1534 c1f1cbb2 Iustin Pop
1535 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
1536 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1537 a8083063 Iustin Pop
1538 a8083063 Iustin Pop
    # begin data gathering
1539 a8083063 Iustin Pop
1540 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1541 a8083063 Iustin Pop
      live_data = {}
1542 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1543 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
1544 a8083063 Iustin Pop
      for name in nodenames:
1545 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1546 a8083063 Iustin Pop
        if nodeinfo:
1547 a8083063 Iustin Pop
          live_data[name] = {
1548 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1549 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1550 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1551 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1552 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1553 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1554 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1555 a8083063 Iustin Pop
            }
1556 a8083063 Iustin Pop
        else:
1557 a8083063 Iustin Pop
          live_data[name] = {}
1558 a8083063 Iustin Pop
    else:
1559 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1560 a8083063 Iustin Pop
1561 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1562 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1563 a8083063 Iustin Pop
1564 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1565 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1566 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1567 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1568 a8083063 Iustin Pop
1569 ec223efb Iustin Pop
      for instance_name in instancelist:
1570 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1571 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1572 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1573 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1574 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1575 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1576 a8083063 Iustin Pop
1577 a8083063 Iustin Pop
    # end data gathering
1578 a8083063 Iustin Pop
1579 a8083063 Iustin Pop
    output = []
1580 a8083063 Iustin Pop
    for node in nodelist:
1581 a8083063 Iustin Pop
      node_output = []
1582 a8083063 Iustin Pop
      for field in self.op.output_fields:
1583 a8083063 Iustin Pop
        if field == "name":
1584 a8083063 Iustin Pop
          val = node.name
1585 ec223efb Iustin Pop
        elif field == "pinst_list":
1586 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1587 ec223efb Iustin Pop
        elif field == "sinst_list":
1588 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1589 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1590 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1591 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1592 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1593 a8083063 Iustin Pop
        elif field == "pip":
1594 a8083063 Iustin Pop
          val = node.primary_ip
1595 a8083063 Iustin Pop
        elif field == "sip":
1596 a8083063 Iustin Pop
          val = node.secondary_ip
1597 130a6a6f Iustin Pop
        elif field == "tags":
1598 130a6a6f Iustin Pop
          val = list(node.GetTags())
1599 38d7239a Iustin Pop
        elif field == "serial_no":
1600 38d7239a Iustin Pop
          val = node.serial_no
1601 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1602 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1603 a8083063 Iustin Pop
        else:
1604 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1605 a8083063 Iustin Pop
        node_output.append(val)
1606 a8083063 Iustin Pop
      output.append(node_output)
1607 a8083063 Iustin Pop
1608 a8083063 Iustin Pop
    return output
1609 a8083063 Iustin Pop
1610 a8083063 Iustin Pop
1611 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1612 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1613 dcb93971 Michael Hanselmann

1614 dcb93971 Michael Hanselmann
  """
1615 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1616 21a15682 Guido Trotter
  REQ_BGL = False
1617 21a15682 Guido Trotter
1618 21a15682 Guido Trotter
  def ExpandNames(self):
1619 21a15682 Guido Trotter
    _CheckOutputFields(static=["node"],
1620 21a15682 Guido Trotter
                       dynamic=["phys", "vg", "name", "size", "instance"],
1621 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1622 21a15682 Guido Trotter
1623 21a15682 Guido Trotter
    self.needed_locks = {}
1624 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1625 21a15682 Guido Trotter
    if not self.op.nodes:
1626 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1627 21a15682 Guido Trotter
    else:
1628 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1629 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1630 dcb93971 Michael Hanselmann
1631 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1632 dcb93971 Michael Hanselmann
    """Check prerequisites.
1633 dcb93971 Michael Hanselmann

1634 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1635 dcb93971 Michael Hanselmann

1636 dcb93971 Michael Hanselmann
    """
1637 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1638 dcb93971 Michael Hanselmann
1639 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1640 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1641 dcb93971 Michael Hanselmann

1642 dcb93971 Michael Hanselmann
    """
1643 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1644 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
1645 dcb93971 Michael Hanselmann
1646 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1647 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1648 dcb93971 Michael Hanselmann
1649 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1650 dcb93971 Michael Hanselmann
1651 dcb93971 Michael Hanselmann
    output = []
1652 dcb93971 Michael Hanselmann
    for node in nodenames:
1653 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1654 37d19eb2 Michael Hanselmann
        continue
1655 37d19eb2 Michael Hanselmann
1656 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1657 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1658 dcb93971 Michael Hanselmann
1659 dcb93971 Michael Hanselmann
      for vol in node_vols:
1660 dcb93971 Michael Hanselmann
        node_output = []
1661 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1662 dcb93971 Michael Hanselmann
          if field == "node":
1663 dcb93971 Michael Hanselmann
            val = node
1664 dcb93971 Michael Hanselmann
          elif field == "phys":
1665 dcb93971 Michael Hanselmann
            val = vol['dev']
1666 dcb93971 Michael Hanselmann
          elif field == "vg":
1667 dcb93971 Michael Hanselmann
            val = vol['vg']
1668 dcb93971 Michael Hanselmann
          elif field == "name":
1669 dcb93971 Michael Hanselmann
            val = vol['name']
1670 dcb93971 Michael Hanselmann
          elif field == "size":
1671 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1672 dcb93971 Michael Hanselmann
          elif field == "instance":
1673 dcb93971 Michael Hanselmann
            for inst in ilist:
1674 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1675 dcb93971 Michael Hanselmann
                continue
1676 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1677 dcb93971 Michael Hanselmann
                val = inst.name
1678 dcb93971 Michael Hanselmann
                break
1679 dcb93971 Michael Hanselmann
            else:
1680 dcb93971 Michael Hanselmann
              val = '-'
1681 dcb93971 Michael Hanselmann
          else:
1682 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1683 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1684 dcb93971 Michael Hanselmann
1685 dcb93971 Michael Hanselmann
        output.append(node_output)
1686 dcb93971 Michael Hanselmann
1687 dcb93971 Michael Hanselmann
    return output
1688 dcb93971 Michael Hanselmann
1689 dcb93971 Michael Hanselmann
1690 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1691 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1692 a8083063 Iustin Pop

1693 a8083063 Iustin Pop
  """
1694 a8083063 Iustin Pop
  HPATH = "node-add"
1695 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1696 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1697 a8083063 Iustin Pop
1698 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1699 a8083063 Iustin Pop
    """Build hooks env.
1700 a8083063 Iustin Pop

1701 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1702 a8083063 Iustin Pop

1703 a8083063 Iustin Pop
    """
1704 a8083063 Iustin Pop
    env = {
1705 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1706 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1707 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1708 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1709 a8083063 Iustin Pop
      }
1710 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1711 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1712 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1713 a8083063 Iustin Pop
1714 a8083063 Iustin Pop
  def CheckPrereq(self):
1715 a8083063 Iustin Pop
    """Check prerequisites.
1716 a8083063 Iustin Pop

1717 a8083063 Iustin Pop
    This checks:
1718 a8083063 Iustin Pop
     - the new node is not already in the config
1719 a8083063 Iustin Pop
     - it is resolvable
1720 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1721 a8083063 Iustin Pop

1722 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1723 a8083063 Iustin Pop

1724 a8083063 Iustin Pop
    """
1725 a8083063 Iustin Pop
    node_name = self.op.node_name
1726 a8083063 Iustin Pop
    cfg = self.cfg
1727 a8083063 Iustin Pop
1728 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1729 a8083063 Iustin Pop
1730 bcf043c9 Iustin Pop
    node = dns_data.name
1731 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1732 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1733 a8083063 Iustin Pop
    if secondary_ip is None:
1734 a8083063 Iustin Pop
      secondary_ip = primary_ip
1735 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1736 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1737 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1738 e7c6e02b Michael Hanselmann
1739 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1740 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1741 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1742 e7c6e02b Michael Hanselmann
                                 node)
1743 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1744 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1745 a8083063 Iustin Pop
1746 a8083063 Iustin Pop
    for existing_node_name in node_list:
1747 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1748 e7c6e02b Michael Hanselmann
1749 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1750 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1751 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1752 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1753 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1754 e7c6e02b Michael Hanselmann
        continue
1755 e7c6e02b Michael Hanselmann
1756 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1757 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1758 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1759 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1760 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1761 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1762 a8083063 Iustin Pop
1763 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1764 a8083063 Iustin Pop
    # same as for the master
1765 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
1766 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1767 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1768 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1769 a8083063 Iustin Pop
      if master_singlehomed:
1770 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1771 3ecf6786 Iustin Pop
                                   " new node has one")
1772 a8083063 Iustin Pop
      else:
1773 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1774 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1775 a8083063 Iustin Pop
1776 a8083063 Iustin Pop
    # checks reachablity
1777 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1778 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1779 a8083063 Iustin Pop
1780 a8083063 Iustin Pop
    if not newbie_singlehomed:
1781 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1782 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1783 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1784 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1785 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1786 a8083063 Iustin Pop
1787 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1788 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1789 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1790 a8083063 Iustin Pop
1791 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1792 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1793 a8083063 Iustin Pop

1794 a8083063 Iustin Pop
    """
1795 a8083063 Iustin Pop
    new_node = self.new_node
1796 a8083063 Iustin Pop
    node = new_node.name
1797 a8083063 Iustin Pop
1798 a8083063 Iustin Pop
    # check connectivity
1799 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
1800 a8083063 Iustin Pop
    if result:
1801 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1802 9a4f63d1 Iustin Pop
        logging.info("Communication to node %s fine, sw version %s match",
1803 9a4f63d1 Iustin Pop
                     node, result)
1804 a8083063 Iustin Pop
      else:
1805 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1806 3ecf6786 Iustin Pop
                                 " node version %s" %
1807 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1808 a8083063 Iustin Pop
    else:
1809 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1810 a8083063 Iustin Pop
1811 a8083063 Iustin Pop
    # setup ssh on node
1812 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
1813 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1814 a8083063 Iustin Pop
    keyarray = []
1815 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1816 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1817 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1818 a8083063 Iustin Pop
1819 a8083063 Iustin Pop
    for i in keyfiles:
1820 a8083063 Iustin Pop
      f = open(i, 'r')
1821 a8083063 Iustin Pop
      try:
1822 a8083063 Iustin Pop
        keyarray.append(f.read())
1823 a8083063 Iustin Pop
      finally:
1824 a8083063 Iustin Pop
        f.close()
1825 a8083063 Iustin Pop
1826 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
1827 72737a7f Iustin Pop
                                    keyarray[2],
1828 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
1829 a8083063 Iustin Pop
1830 a8083063 Iustin Pop
    if not result:
1831 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1832 a8083063 Iustin Pop
1833 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1834 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1835 c8a0948f Michael Hanselmann
1836 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1837 caad16e2 Iustin Pop
      if not self.rpc.call_node_has_ip_address(new_node.name,
1838 caad16e2 Iustin Pop
                                               new_node.secondary_ip):
1839 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1840 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1841 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1842 a8083063 Iustin Pop
1843 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
1844 5c0527ed Guido Trotter
    node_verify_param = {
1845 5c0527ed Guido Trotter
      'nodelist': [node],
1846 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1847 5c0527ed Guido Trotter
    }
1848 5c0527ed Guido Trotter
1849 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
1850 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
1851 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1852 5c0527ed Guido Trotter
      if not result[verifier]:
1853 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1854 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1855 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1856 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1857 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1858 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1859 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1860 ff98055b Iustin Pop
1861 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1862 a8083063 Iustin Pop
    # including the node just added
1863 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
1864 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1865 102b115b Michael Hanselmann
    if not self.op.readd:
1866 102b115b Michael Hanselmann
      dist_nodes.append(node)
1867 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1868 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1869 a8083063 Iustin Pop
1870 9a4f63d1 Iustin Pop
    logging.debug("Copying hosts and known_hosts to all nodes")
1871 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1872 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
1873 a8083063 Iustin Pop
      for to_node in dist_nodes:
1874 a8083063 Iustin Pop
        if not result[to_node]:
1875 9a4f63d1 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
1876 a8083063 Iustin Pop
1877 d6a02168 Michael Hanselmann
    to_copy = []
1878 00cd937c Iustin Pop
    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
1879 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1880 a8083063 Iustin Pop
    for fname in to_copy:
1881 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
1882 b5602d15 Guido Trotter
      if not result[node]:
1883 9a4f63d1 Iustin Pop
        logging.error("Could not copy file %s to node %s", fname, node)
1884 a8083063 Iustin Pop
1885 d8470559 Michael Hanselmann
    if self.op.readd:
1886 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1887 d8470559 Michael Hanselmann
    else:
1888 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1889 a8083063 Iustin Pop
1890 a8083063 Iustin Pop
1891 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1892 a8083063 Iustin Pop
  """Query cluster configuration.
1893 a8083063 Iustin Pop

1894 a8083063 Iustin Pop
  """
1895 a8083063 Iustin Pop
  _OP_REQP = []
1896 59322403 Iustin Pop
  REQ_MASTER = False
1897 642339cf Guido Trotter
  REQ_BGL = False
1898 642339cf Guido Trotter
1899 642339cf Guido Trotter
  def ExpandNames(self):
1900 642339cf Guido Trotter
    self.needed_locks = {}
1901 a8083063 Iustin Pop
1902 a8083063 Iustin Pop
  def CheckPrereq(self):
1903 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1904 a8083063 Iustin Pop

1905 a8083063 Iustin Pop
    """
1906 a8083063 Iustin Pop
    pass
1907 a8083063 Iustin Pop
1908 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1909 a8083063 Iustin Pop
    """Return cluster config.
1910 a8083063 Iustin Pop

1911 a8083063 Iustin Pop
    """
1912 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1913 a8083063 Iustin Pop
    result = {
1914 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1915 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1916 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1917 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1918 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1919 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1920 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
1921 469f88e1 Iustin Pop
      "master": cluster.master_node,
1922 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
1923 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
1924 469f88e1 Iustin Pop
      "hvparams": cluster.hvparams,
1925 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
1926 a8083063 Iustin Pop
      }
1927 a8083063 Iustin Pop
1928 a8083063 Iustin Pop
    return result
1929 a8083063 Iustin Pop
1930 a8083063 Iustin Pop
1931 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
1932 ae5849b5 Michael Hanselmann
  """Return configuration values.
1933 a8083063 Iustin Pop

1934 a8083063 Iustin Pop
  """
1935 a8083063 Iustin Pop
  _OP_REQP = []
1936 642339cf Guido Trotter
  REQ_BGL = False
1937 642339cf Guido Trotter
1938 642339cf Guido Trotter
  def ExpandNames(self):
1939 642339cf Guido Trotter
    self.needed_locks = {}
1940 a8083063 Iustin Pop
1941 3ccafd0e Iustin Pop
    static_fields = ["cluster_name", "master_node", "drain_flag"]
1942 ae5849b5 Michael Hanselmann
    _CheckOutputFields(static=static_fields,
1943 ae5849b5 Michael Hanselmann
                       dynamic=[],
1944 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
1945 ae5849b5 Michael Hanselmann
1946 a8083063 Iustin Pop
  def CheckPrereq(self):
1947 a8083063 Iustin Pop
    """No prerequisites.
1948 a8083063 Iustin Pop

1949 a8083063 Iustin Pop
    """
1950 a8083063 Iustin Pop
    pass
1951 a8083063 Iustin Pop
1952 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1953 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1954 a8083063 Iustin Pop

1955 a8083063 Iustin Pop
    """
1956 ae5849b5 Michael Hanselmann
    values = []
1957 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
1958 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
1959 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
1960 ae5849b5 Michael Hanselmann
      elif field == "master_node":
1961 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
1962 3ccafd0e Iustin Pop
      elif field == "drain_flag":
1963 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
1964 ae5849b5 Michael Hanselmann
      else:
1965 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
1966 3ccafd0e Iustin Pop
      values.append(entry)
1967 ae5849b5 Michael Hanselmann
    return values
1968 a8083063 Iustin Pop
1969 a8083063 Iustin Pop
1970 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1971 a8083063 Iustin Pop
  """Bring up an instance's disks.
1972 a8083063 Iustin Pop

1973 a8083063 Iustin Pop
  """
1974 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1975 f22a8ba3 Guido Trotter
  REQ_BGL = False
1976 f22a8ba3 Guido Trotter
1977 f22a8ba3 Guido Trotter
  def ExpandNames(self):
1978 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
1979 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
1980 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1981 f22a8ba3 Guido Trotter
1982 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
1983 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
1984 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
1985 a8083063 Iustin Pop
1986 a8083063 Iustin Pop
  def CheckPrereq(self):
1987 a8083063 Iustin Pop
    """Check prerequisites.
1988 a8083063 Iustin Pop

1989 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1990 a8083063 Iustin Pop

1991 a8083063 Iustin Pop
    """
1992 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1993 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
1994 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
1995 a8083063 Iustin Pop
1996 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1997 a8083063 Iustin Pop
    """Activate the disks.
1998 a8083063 Iustin Pop

1999 a8083063 Iustin Pop
    """
2000 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2001 a8083063 Iustin Pop
    if not disks_ok:
2002 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2003 a8083063 Iustin Pop
2004 a8083063 Iustin Pop
    return disks_info
2005 a8083063 Iustin Pop
2006 a8083063 Iustin Pop
2007 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2008 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2009 a8083063 Iustin Pop

2010 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2011 a8083063 Iustin Pop

2012 a8083063 Iustin Pop
  Args:
2013 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
2014 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
2015 a8083063 Iustin Pop
                        in an error return from the function
2016 a8083063 Iustin Pop

2017 a8083063 Iustin Pop
  Returns:
2018 a8083063 Iustin Pop
    false if the operation failed
2019 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
2020 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
2021 a8083063 Iustin Pop
  """
2022 a8083063 Iustin Pop
  device_info = []
2023 a8083063 Iustin Pop
  disks_ok = True
2024 fdbd668d Iustin Pop
  iname = instance.name
2025 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2026 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2027 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2028 fdbd668d Iustin Pop
2029 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2030 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2031 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2032 fdbd668d Iustin Pop
  # SyncSource, etc.)
2033 fdbd668d Iustin Pop
2034 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2035 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2036 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2037 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2038 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2039 a8083063 Iustin Pop
      if not result:
2040 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2041 86d9d3bb Iustin Pop
                           " (is_primary=False, pass=1)",
2042 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2043 fdbd668d Iustin Pop
        if not ignore_secondaries:
2044 a8083063 Iustin Pop
          disks_ok = False
2045 fdbd668d Iustin Pop
2046 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2047 fdbd668d Iustin Pop
2048 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2049 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2050 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2051 fdbd668d Iustin Pop
      if node != instance.primary_node:
2052 fdbd668d Iustin Pop
        continue
2053 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2054 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2055 fdbd668d Iustin Pop
      if not result:
2056 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2057 86d9d3bb Iustin Pop
                           " (is_primary=True, pass=2)",
2058 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2059 fdbd668d Iustin Pop
        disks_ok = False
2060 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
2061 a8083063 Iustin Pop
2062 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2063 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2064 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2065 b352ab5b Iustin Pop
  for disk in instance.disks:
2066 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2067 b352ab5b Iustin Pop
2068 a8083063 Iustin Pop
  return disks_ok, device_info
2069 a8083063 Iustin Pop
2070 a8083063 Iustin Pop
2071 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2072 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2073 3ecf6786 Iustin Pop

2074 3ecf6786 Iustin Pop
  """
2075 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2076 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2077 fe7b0351 Michael Hanselmann
  if not disks_ok:
2078 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2079 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2080 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2081 86d9d3bb Iustin Pop
                         " secondary node,"
2082 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2083 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2084 fe7b0351 Michael Hanselmann
2085 fe7b0351 Michael Hanselmann
2086 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2087 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2088 a8083063 Iustin Pop

2089 a8083063 Iustin Pop
  """
2090 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2091 f22a8ba3 Guido Trotter
  REQ_BGL = False
2092 f22a8ba3 Guido Trotter
2093 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2094 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2095 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2096 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2097 f22a8ba3 Guido Trotter
2098 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2099 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2100 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2101 a8083063 Iustin Pop
2102 a8083063 Iustin Pop
  def CheckPrereq(self):
2103 a8083063 Iustin Pop
    """Check prerequisites.
2104 a8083063 Iustin Pop

2105 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2106 a8083063 Iustin Pop

2107 a8083063 Iustin Pop
    """
2108 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2109 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2110 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2111 a8083063 Iustin Pop
2112 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2113 a8083063 Iustin Pop
    """Deactivate the disks
2114 a8083063 Iustin Pop

2115 a8083063 Iustin Pop
    """
2116 a8083063 Iustin Pop
    instance = self.instance
2117 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2118 a8083063 Iustin Pop
2119 a8083063 Iustin Pop
2120 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2121 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2122 155d6c75 Guido Trotter

2123 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2124 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2125 155d6c75 Guido Trotter

2126 155d6c75 Guido Trotter
  """
2127 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2128 72737a7f Iustin Pop
                                      [instance.hypervisor])
2129 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2130 155d6c75 Guido Trotter
  if not type(ins_l) is list:
2131 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2132 155d6c75 Guido Trotter
                             instance.primary_node)
2133 155d6c75 Guido Trotter
2134 155d6c75 Guido Trotter
  if instance.name in ins_l:
2135 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2136 155d6c75 Guido Trotter
                             " block devices.")
2137 155d6c75 Guido Trotter
2138 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2139 a8083063 Iustin Pop
2140 a8083063 Iustin Pop
2141 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2142 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2143 a8083063 Iustin Pop

2144 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2145 a8083063 Iustin Pop

2146 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2147 a8083063 Iustin Pop
  ignored.
2148 a8083063 Iustin Pop

2149 a8083063 Iustin Pop
  """
2150 a8083063 Iustin Pop
  result = True
2151 a8083063 Iustin Pop
  for disk in instance.disks:
2152 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2153 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2154 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_shutdown(node, top_disk):
2155 9a4f63d1 Iustin Pop
        logging.error("Could not shutdown block device %s on node %s",
2156 9a4f63d1 Iustin Pop
                      disk.iv_name, node)
2157 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2158 a8083063 Iustin Pop
          result = False
2159 a8083063 Iustin Pop
  return result
2160 a8083063 Iustin Pop
2161 a8083063 Iustin Pop
2162 b9bddb6b Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
2163 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2164 d4f16fd9 Iustin Pop

2165 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2166 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2167 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2168 d4f16fd9 Iustin Pop
  exception.
2169 d4f16fd9 Iustin Pop

2170 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2171 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2172 e69d05fd Iustin Pop
  @type node: C{str}
2173 e69d05fd Iustin Pop
  @param node: the node to check
2174 e69d05fd Iustin Pop
  @type reason: C{str}
2175 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2176 e69d05fd Iustin Pop
  @type requested: C{int}
2177 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2178 e69d05fd Iustin Pop
  @type hypervisor: C{str}
2179 e69d05fd Iustin Pop
  @param hypervisor: the hypervisor to ask for memory stats
2180 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2181 e69d05fd Iustin Pop
      we cannot check the node
2182 d4f16fd9 Iustin Pop

2183 d4f16fd9 Iustin Pop
  """
2184 72737a7f Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
2185 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2186 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2187 d4f16fd9 Iustin Pop
                             " information" % (node,))
2188 d4f16fd9 Iustin Pop
2189 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2190 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2191 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2192 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2193 d4f16fd9 Iustin Pop
  if requested > free_mem:
2194 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2195 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2196 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2197 d4f16fd9 Iustin Pop
2198 d4f16fd9 Iustin Pop
2199 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2200 a8083063 Iustin Pop
  """Starts an instance.
2201 a8083063 Iustin Pop

2202 a8083063 Iustin Pop
  """
2203 a8083063 Iustin Pop
  HPATH = "instance-start"
2204 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2205 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2206 e873317a Guido Trotter
  REQ_BGL = False
2207 e873317a Guido Trotter
2208 e873317a Guido Trotter
  def ExpandNames(self):
2209 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2210 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2211 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2212 e873317a Guido Trotter
2213 e873317a Guido Trotter
  def DeclareLocks(self, level):
2214 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2215 e873317a Guido Trotter
      self._LockInstancesNodes()
2216 a8083063 Iustin Pop
2217 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2218 a8083063 Iustin Pop
    """Build hooks env.
2219 a8083063 Iustin Pop

2220 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2221 a8083063 Iustin Pop

2222 a8083063 Iustin Pop
    """
2223 a8083063 Iustin Pop
    env = {
2224 a8083063 Iustin Pop
      "FORCE": self.op.force,
2225 a8083063 Iustin Pop
      }
2226 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2227 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2228 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2229 a8083063 Iustin Pop
    return env, nl, nl
2230 a8083063 Iustin Pop
2231 a8083063 Iustin Pop
  def CheckPrereq(self):
2232 a8083063 Iustin Pop
    """Check prerequisites.
2233 a8083063 Iustin Pop

2234 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2235 a8083063 Iustin Pop

2236 a8083063 Iustin Pop
    """
2237 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2238 e873317a Guido Trotter
    assert self.instance is not None, \
2239 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2240 a8083063 Iustin Pop
2241 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2242 a8083063 Iustin Pop
    # check bridges existance
2243 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2244 a8083063 Iustin Pop
2245 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, instance.primary_node,
2246 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2247 338e51e8 Iustin Pop
                         bep[constants.BE_MEMORY], instance.hypervisor)
2248 d4f16fd9 Iustin Pop
2249 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2250 a8083063 Iustin Pop
    """Start the instance.
2251 a8083063 Iustin Pop

2252 a8083063 Iustin Pop
    """
2253 a8083063 Iustin Pop
    instance = self.instance
2254 a8083063 Iustin Pop
    force = self.op.force
2255 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2256 a8083063 Iustin Pop
2257 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2258 fe482621 Iustin Pop
2259 a8083063 Iustin Pop
    node_current = instance.primary_node
2260 a8083063 Iustin Pop
2261 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2262 a8083063 Iustin Pop
2263 72737a7f Iustin Pop
    if not self.rpc.call_instance_start(node_current, instance, extra_args):
2264 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2265 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2266 a8083063 Iustin Pop
2267 a8083063 Iustin Pop
2268 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2269 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2270 bf6929a2 Alexander Schreiber

2271 bf6929a2 Alexander Schreiber
  """
2272 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2273 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2274 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2275 e873317a Guido Trotter
  REQ_BGL = False
2276 e873317a Guido Trotter
2277 e873317a Guido Trotter
  def ExpandNames(self):
2278 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2279 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2280 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2281 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2282 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2283 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2284 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2285 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2286 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2287 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2288 e873317a Guido Trotter
2289 e873317a Guido Trotter
  def DeclareLocks(self, level):
2290 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2291 849da276 Guido Trotter
      primary_only = not constants.INSTANCE_REBOOT_FULL
2292 849da276 Guido Trotter
      self._LockInstancesNodes(primary_only=primary_only)
2293 bf6929a2 Alexander Schreiber
2294 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2295 bf6929a2 Alexander Schreiber
    """Build hooks env.
2296 bf6929a2 Alexander Schreiber

2297 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2298 bf6929a2 Alexander Schreiber

2299 bf6929a2 Alexander Schreiber
    """
2300 bf6929a2 Alexander Schreiber
    env = {
2301 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2302 bf6929a2 Alexander Schreiber
      }
2303 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2304 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2305 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2306 bf6929a2 Alexander Schreiber
    return env, nl, nl
2307 bf6929a2 Alexander Schreiber
2308 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2309 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2310 bf6929a2 Alexander Schreiber

2311 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2312 bf6929a2 Alexander Schreiber

2313 bf6929a2 Alexander Schreiber
    """
2314 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2315 e873317a Guido Trotter
    assert self.instance is not None, \
2316 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2317 bf6929a2 Alexander Schreiber
2318 bf6929a2 Alexander Schreiber
    # check bridges existance
2319 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2320 bf6929a2 Alexander Schreiber
2321 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2322 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2323 bf6929a2 Alexander Schreiber

2324 bf6929a2 Alexander Schreiber
    """
2325 bf6929a2 Alexander Schreiber
    instance = self.instance
2326 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2327 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2328 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2329 bf6929a2 Alexander Schreiber
2330 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2331 bf6929a2 Alexander Schreiber
2332 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2333 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2334 72737a7f Iustin Pop
      if not self.rpc.call_instance_reboot(node_current, instance,
2335 72737a7f Iustin Pop
                                           reboot_type, extra_args):
2336 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2337 bf6929a2 Alexander Schreiber
    else:
2338 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(node_current, instance):
2339 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2340 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2341 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
2342 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(node_current, instance, extra_args):
2343 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2344 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2345 bf6929a2 Alexander Schreiber
2346 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2347 bf6929a2 Alexander Schreiber
2348 bf6929a2 Alexander Schreiber
2349 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2350 a8083063 Iustin Pop
  """Shutdown an instance.
2351 a8083063 Iustin Pop

2352 a8083063 Iustin Pop
  """
2353 a8083063 Iustin Pop
  HPATH = "instance-stop"
2354 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2355 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2356 e873317a Guido Trotter
  REQ_BGL = False
2357 e873317a Guido Trotter
2358 e873317a Guido Trotter
  def ExpandNames(self):
2359 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2360 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2361 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2362 e873317a Guido Trotter
2363 e873317a Guido Trotter
  def DeclareLocks(self, level):
2364 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2365 e873317a Guido Trotter
      self._LockInstancesNodes()
2366 a8083063 Iustin Pop
2367 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2368 a8083063 Iustin Pop
    """Build hooks env.
2369 a8083063 Iustin Pop

2370 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2371 a8083063 Iustin Pop

2372 a8083063 Iustin Pop
    """
2373 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2374 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2375 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2376 a8083063 Iustin Pop
    return env, nl, nl
2377 a8083063 Iustin Pop
2378 a8083063 Iustin Pop
  def CheckPrereq(self):
2379 a8083063 Iustin Pop
    """Check prerequisites.
2380 a8083063 Iustin Pop

2381 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2382 a8083063 Iustin Pop

2383 a8083063 Iustin Pop
    """
2384 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2385 e873317a Guido Trotter
    assert self.instance is not None, \
2386 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2387 a8083063 Iustin Pop
2388 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2389 a8083063 Iustin Pop
    """Shutdown the instance.
2390 a8083063 Iustin Pop

2391 a8083063 Iustin Pop
    """
2392 a8083063 Iustin Pop
    instance = self.instance
2393 a8083063 Iustin Pop
    node_current = instance.primary_node
2394 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2395 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(node_current, instance):
2396 86d9d3bb Iustin Pop
      self.proc.LogWarning("Could not shutdown instance")
2397 a8083063 Iustin Pop
2398 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
2399 a8083063 Iustin Pop
2400 a8083063 Iustin Pop
2401 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2402 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2403 fe7b0351 Michael Hanselmann

2404 fe7b0351 Michael Hanselmann
  """
2405 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2406 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2407 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2408 4e0b4d2d Guido Trotter
  REQ_BGL = False
2409 4e0b4d2d Guido Trotter
2410 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2411 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2412 4e0b4d2d Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2413 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2414 4e0b4d2d Guido Trotter
2415 4e0b4d2d Guido Trotter
  def DeclareLocks(self, level):
2416 4e0b4d2d Guido Trotter
    if level == locking.LEVEL_NODE:
2417 4e0b4d2d Guido Trotter
      self._LockInstancesNodes()
2418 fe7b0351 Michael Hanselmann
2419 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2420 fe7b0351 Michael Hanselmann
    """Build hooks env.
2421 fe7b0351 Michael Hanselmann

2422 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2423 fe7b0351 Michael Hanselmann

2424 fe7b0351 Michael Hanselmann
    """
2425 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2426 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2427 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2428 fe7b0351 Michael Hanselmann
    return env, nl, nl
2429 fe7b0351 Michael Hanselmann
2430 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2431 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2432 fe7b0351 Michael Hanselmann

2433 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2434 fe7b0351 Michael Hanselmann

2435 fe7b0351 Michael Hanselmann
    """
2436 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2437 4e0b4d2d Guido Trotter
    assert instance is not None, \
2438 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2439 4e0b4d2d Guido Trotter
2440 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2441 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2442 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2443 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2444 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2445 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2446 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2447 72737a7f Iustin Pop
                                              instance.name,
2448 72737a7f Iustin Pop
                                              instance.hypervisor)
2449 fe7b0351 Michael Hanselmann
    if remote_info:
2450 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2451 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2452 3ecf6786 Iustin Pop
                                  instance.primary_node))
2453 d0834de3 Michael Hanselmann
2454 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2455 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2456 d0834de3 Michael Hanselmann
      # OS verification
2457 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2458 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2459 d0834de3 Michael Hanselmann
      if pnode is None:
2460 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2461 3ecf6786 Iustin Pop
                                   self.op.pnode)
2462 72737a7f Iustin Pop
      os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
2463 dfa96ded Guido Trotter
      if not os_obj:
2464 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2465 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2466 d0834de3 Michael Hanselmann
2467 fe7b0351 Michael Hanselmann
    self.instance = instance
2468 fe7b0351 Michael Hanselmann
2469 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2470 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2471 fe7b0351 Michael Hanselmann

2472 fe7b0351 Michael Hanselmann
    """
2473 fe7b0351 Michael Hanselmann
    inst = self.instance
2474 fe7b0351 Michael Hanselmann
2475 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2476 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2477 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2478 97abc79f Iustin Pop
      self.cfg.Update(inst)
2479 d0834de3 Michael Hanselmann
2480 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2481 fe7b0351 Michael Hanselmann
    try:
2482 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2483 72737a7f Iustin Pop
      if not self.rpc.call_instance_os_add(inst.primary_node, inst,
2484 72737a7f Iustin Pop
                                           "sda", "sdb"):
2485 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2486 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2487 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2488 fe7b0351 Michael Hanselmann
    finally:
2489 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2490 fe7b0351 Michael Hanselmann
2491 fe7b0351 Michael Hanselmann
2492 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2493 decd5f45 Iustin Pop
  """Rename an instance.
2494 decd5f45 Iustin Pop

2495 decd5f45 Iustin Pop
  """
2496 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2497 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2498 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2499 decd5f45 Iustin Pop
2500 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2501 decd5f45 Iustin Pop
    """Build hooks env.
2502 decd5f45 Iustin Pop

2503 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2504 decd5f45 Iustin Pop

2505 decd5f45 Iustin Pop
    """
2506 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2507 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2508 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2509 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2510 decd5f45 Iustin Pop
    return env, nl, nl
2511 decd5f45 Iustin Pop
2512 decd5f45 Iustin Pop
  def CheckPrereq(self):
2513 decd5f45 Iustin Pop
    """Check prerequisites.
2514 decd5f45 Iustin Pop

2515 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2516 decd5f45 Iustin Pop

2517 decd5f45 Iustin Pop
    """
2518 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2519 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2520 decd5f45 Iustin Pop
    if instance is None:
2521 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2522 decd5f45 Iustin Pop
                                 self.op.instance_name)
2523 decd5f45 Iustin Pop
    if instance.status != "down":
2524 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2525 decd5f45 Iustin Pop
                                 self.op.instance_name)
2526 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2527 72737a7f Iustin Pop
                                              instance.name,
2528 72737a7f Iustin Pop
                                              instance.hypervisor)
2529 decd5f45 Iustin Pop
    if remote_info:
2530 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2531 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2532 decd5f45 Iustin Pop
                                  instance.primary_node))
2533 decd5f45 Iustin Pop
    self.instance = instance
2534 decd5f45 Iustin Pop
2535 decd5f45 Iustin Pop
    # new name verification
2536 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2537 decd5f45 Iustin Pop
2538 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2539 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2540 7bde3275 Guido Trotter
    if new_name in instance_list:
2541 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2542 c09f363f Manuel Franceschini
                                 new_name)
2543 7bde3275 Guido Trotter
2544 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2545 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2546 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2547 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2548 decd5f45 Iustin Pop
2549 decd5f45 Iustin Pop
2550 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2551 decd5f45 Iustin Pop
    """Reinstall the instance.
2552 decd5f45 Iustin Pop

2553 decd5f45 Iustin Pop
    """
2554 decd5f45 Iustin Pop
    inst = self.instance
2555 decd5f45 Iustin Pop
    old_name = inst.name
2556 decd5f45 Iustin Pop
2557 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2558 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2559 b23c4333 Manuel Franceschini
2560 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2561 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2562 74b5913f Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
2563 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2564 decd5f45 Iustin Pop
2565 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2566 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2567 decd5f45 Iustin Pop
2568 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2569 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2570 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2571 72737a7f Iustin Pop
                                                     old_file_storage_dir,
2572 72737a7f Iustin Pop
                                                     new_file_storage_dir)
2573 b23c4333 Manuel Franceschini
2574 b23c4333 Manuel Franceschini
      if not result:
2575 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2576 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2577 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2578 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2579 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2580 b23c4333 Manuel Franceschini
2581 b23c4333 Manuel Franceschini
      if not result[0]:
2582 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2583 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2584 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2585 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2586 b23c4333 Manuel Franceschini
2587 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2588 decd5f45 Iustin Pop
    try:
2589 72737a7f Iustin Pop
      if not self.rpc.call_instance_run_rename(inst.primary_node, inst,
2590 d15a9ad3 Guido Trotter
                                               old_name):
2591 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2592 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2593 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2594 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
2595 decd5f45 Iustin Pop
    finally:
2596 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2597 decd5f45 Iustin Pop
2598 decd5f45 Iustin Pop
2599 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2600 a8083063 Iustin Pop
  """Remove an instance.
2601 a8083063 Iustin Pop

2602 a8083063 Iustin Pop
  """
2603 a8083063 Iustin Pop
  HPATH = "instance-remove"
2604 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2605 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2606 cf472233 Guido Trotter
  REQ_BGL = False
2607 cf472233 Guido Trotter
2608 cf472233 Guido Trotter
  def ExpandNames(self):
2609 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
2610 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2611 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2612 cf472233 Guido Trotter
2613 cf472233 Guido Trotter
  def DeclareLocks(self, level):
2614 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
2615 cf472233 Guido Trotter
      self._LockInstancesNodes()
2616 a8083063 Iustin Pop
2617 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2618 a8083063 Iustin Pop
    """Build hooks env.
2619 a8083063 Iustin Pop

2620 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2621 a8083063 Iustin Pop

2622 a8083063 Iustin Pop
    """
2623 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2624 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
2625 a8083063 Iustin Pop
    return env, nl, nl
2626 a8083063 Iustin Pop
2627 a8083063 Iustin Pop
  def CheckPrereq(self):
2628 a8083063 Iustin Pop
    """Check prerequisites.
2629 a8083063 Iustin Pop

2630 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2631 a8083063 Iustin Pop

2632 a8083063 Iustin Pop
    """
2633 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2634 cf472233 Guido Trotter
    assert self.instance is not None, \
2635 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2636 a8083063 Iustin Pop
2637 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2638 a8083063 Iustin Pop
    """Remove the instance.
2639 a8083063 Iustin Pop

2640 a8083063 Iustin Pop
    """
2641 a8083063 Iustin Pop
    instance = self.instance
2642 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
2643 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
2644 a8083063 Iustin Pop
2645 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(instance.primary_node, instance):
2646 1d67656e Iustin Pop
      if self.op.ignore_failures:
2647 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2648 1d67656e Iustin Pop
      else:
2649 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2650 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2651 a8083063 Iustin Pop
2652 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
2653 a8083063 Iustin Pop
2654 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
2655 1d67656e Iustin Pop
      if self.op.ignore_failures:
2656 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2657 1d67656e Iustin Pop
      else:
2658 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2659 a8083063 Iustin Pop
2660 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
2661 a8083063 Iustin Pop
2662 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2663 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2664 a8083063 Iustin Pop
2665 a8083063 Iustin Pop
2666 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2667 a8083063 Iustin Pop
  """Logical unit for querying instances.
2668 a8083063 Iustin Pop

2669 a8083063 Iustin Pop
  """
2670 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2671 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2672 a8083063 Iustin Pop
2673 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2674 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2675 338e51e8 Iustin Pop
    hvp = ["hv/%s" % name for name in constants.HVS_PARAMETERS]
2676 338e51e8 Iustin Pop
    bep = ["be/%s" % name for name in constants.BES_PARAMETERS]
2677 57a2fb91 Iustin Pop
    self.static_fields = frozenset([
2678 57a2fb91 Iustin Pop
      "name", "os", "pnode", "snodes",
2679 57a2fb91 Iustin Pop
      "admin_state", "admin_ram",
2680 57a2fb91 Iustin Pop
      "disk_template", "ip", "mac", "bridge",
2681 57a2fb91 Iustin Pop
      "sda_size", "sdb_size", "vcpus", "tags",
2682 1a05d855 Oleksiy Mishchenko
      "network_port", "beparams",
2683 5018a335 Iustin Pop
      "serial_no", "hypervisor", "hvparams",
2684 338e51e8 Iustin Pop
      ] + hvp + bep)
2685 338e51e8 Iustin Pop
2686 57a2fb91 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
2687 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2688 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2689 a8083063 Iustin Pop
2690 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2691 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2692 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2693 7eb9d8f7 Guido Trotter
2694 57a2fb91 Iustin Pop
    if self.op.names:
2695 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
2696 7eb9d8f7 Guido Trotter
    else:
2697 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
2698 7eb9d8f7 Guido Trotter
2699 57a2fb91 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
2700 57a2fb91 Iustin Pop
    if self.do_locking:
2701 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2702 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
2703 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2704 7eb9d8f7 Guido Trotter
2705 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2706 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
2707 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2708 7eb9d8f7 Guido Trotter
2709 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2710 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2711 7eb9d8f7 Guido Trotter

2712 7eb9d8f7 Guido Trotter
    """
2713 57a2fb91 Iustin Pop
    pass
2714 069dcc86 Iustin Pop
2715 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2716 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2717 a8083063 Iustin Pop

2718 a8083063 Iustin Pop
    """
2719 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
2720 57a2fb91 Iustin Pop
    if self.do_locking:
2721 57a2fb91 Iustin Pop
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2722 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2723 3fa93523 Guido Trotter
      instance_names = self.wanted
2724 3fa93523 Guido Trotter
      missing = set(instance_names).difference(all_info.keys())
2725 3fa93523 Guido Trotter
      if missing:
2726 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2727 3fa93523 Guido Trotter
          "Some instances were removed before retrieving their data: %s"
2728 3fa93523 Guido Trotter
          % missing)
2729 57a2fb91 Iustin Pop
    else:
2730 57a2fb91 Iustin Pop
      instance_names = all_info.keys()
2731 c1f1cbb2 Iustin Pop
2732 c1f1cbb2 Iustin Pop
    instance_names = utils.NiceSort(instance_names)
2733 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
2734 a8083063 Iustin Pop
2735 a8083063 Iustin Pop
    # begin data gathering
2736 a8083063 Iustin Pop
2737 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2738 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
2739 a8083063 Iustin Pop
2740 a8083063 Iustin Pop
    bad_nodes = []
2741 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2742 a8083063 Iustin Pop
      live_data = {}
2743 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
2744 a8083063 Iustin Pop
      for name in nodes:
2745 a8083063 Iustin Pop
        result = node_data[name]
2746 a8083063 Iustin Pop
        if result:
2747 a8083063 Iustin Pop
          live_data.update(result)
2748 a8083063 Iustin Pop
        elif result == False:
2749 a8083063 Iustin Pop
          bad_nodes.append(name)
2750 a8083063 Iustin Pop
        # else no instance is alive
2751 a8083063 Iustin Pop
    else:
2752 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2753 a8083063 Iustin Pop
2754 a8083063 Iustin Pop
    # end data gathering
2755 a8083063 Iustin Pop
2756 5018a335 Iustin Pop
    HVPREFIX = "hv/"
2757 338e51e8 Iustin Pop
    BEPREFIX = "be/"
2758 a8083063 Iustin Pop
    output = []
2759 a8083063 Iustin Pop
    for instance in instance_list:
2760 a8083063 Iustin Pop
      iout = []
2761 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
2762 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
2763 a8083063 Iustin Pop
      for field in self.op.output_fields:
2764 a8083063 Iustin Pop
        if field == "name":
2765 a8083063 Iustin Pop
          val = instance.name
2766 a8083063 Iustin Pop
        elif field == "os":
2767 a8083063 Iustin Pop
          val = instance.os
2768 a8083063 Iustin Pop
        elif field == "pnode":
2769 a8083063 Iustin Pop
          val = instance.primary_node
2770 a8083063 Iustin Pop
        elif field == "snodes":
2771 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2772 a8083063 Iustin Pop
        elif field == "admin_state":
2773 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2774 a8083063 Iustin Pop
        elif field == "oper_state":
2775 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2776 8a23d2d3 Iustin Pop
            val = None
2777 a8083063 Iustin Pop
          else:
2778 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2779 d8052456 Iustin Pop
        elif field == "status":
2780 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2781 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2782 d8052456 Iustin Pop
          else:
2783 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2784 d8052456 Iustin Pop
            if running:
2785 d8052456 Iustin Pop
              if instance.status != "down":
2786 d8052456 Iustin Pop
                val = "running"
2787 d8052456 Iustin Pop
              else:
2788 d8052456 Iustin Pop
                val = "ERROR_up"
2789 d8052456 Iustin Pop
            else:
2790 d8052456 Iustin Pop
              if instance.status != "down":
2791 d8052456 Iustin Pop
                val = "ERROR_down"
2792 d8052456 Iustin Pop
              else:
2793 d8052456 Iustin Pop
                val = "ADMIN_down"
2794 a8083063 Iustin Pop
        elif field == "oper_ram":
2795 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2796 8a23d2d3 Iustin Pop
            val = None
2797 a8083063 Iustin Pop
          elif instance.name in live_data:
2798 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2799 a8083063 Iustin Pop
          else:
2800 a8083063 Iustin Pop
            val = "-"
2801 a8083063 Iustin Pop
        elif field == "disk_template":
2802 a8083063 Iustin Pop
          val = instance.disk_template
2803 a8083063 Iustin Pop
        elif field == "ip":
2804 a8083063 Iustin Pop
          val = instance.nics[0].ip
2805 a8083063 Iustin Pop
        elif field == "bridge":
2806 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2807 a8083063 Iustin Pop
        elif field == "mac":
2808 a8083063 Iustin Pop
          val = instance.nics[0].mac
2809 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2810 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2811 644eeef9 Iustin Pop
          if disk is None:
2812 8a23d2d3 Iustin Pop
            val = None
2813 644eeef9 Iustin Pop
          else:
2814 644eeef9 Iustin Pop
            val = disk.size
2815 130a6a6f Iustin Pop
        elif field == "tags":
2816 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2817 38d7239a Iustin Pop
        elif field == "serial_no":
2818 38d7239a Iustin Pop
          val = instance.serial_no
2819 5018a335 Iustin Pop
        elif field == "network_port":
2820 5018a335 Iustin Pop
          val = instance.network_port
2821 338e51e8 Iustin Pop
        elif field == "hypervisor":
2822 338e51e8 Iustin Pop
          val = instance.hypervisor
2823 338e51e8 Iustin Pop
        elif field == "hvparams":
2824 338e51e8 Iustin Pop
          val = i_hv
2825 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
2826 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
2827 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
2828 338e51e8 Iustin Pop
        elif field == "beparams":
2829 338e51e8 Iustin Pop
          val = i_be
2830 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
2831 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
2832 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
2833 a8083063 Iustin Pop
        else:
2834 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2835 a8083063 Iustin Pop
        iout.append(val)
2836 a8083063 Iustin Pop
      output.append(iout)
2837 a8083063 Iustin Pop
2838 a8083063 Iustin Pop
    return output
2839 a8083063 Iustin Pop
2840 a8083063 Iustin Pop
2841 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2842 a8083063 Iustin Pop
  """Failover an instance.
2843 a8083063 Iustin Pop

2844 a8083063 Iustin Pop
  """
2845 a8083063 Iustin Pop
  HPATH = "instance-failover"
2846 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2847 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2848 c9e5c064 Guido Trotter
  REQ_BGL = False
2849 c9e5c064 Guido Trotter
2850 c9e5c064 Guido Trotter
  def ExpandNames(self):
2851 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2852 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2853 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2854 c9e5c064 Guido Trotter
2855 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2856 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2857 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2858 a8083063 Iustin Pop
2859 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2860 a8083063 Iustin Pop
    """Build hooks env.
2861 a8083063 Iustin Pop

2862 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2863 a8083063 Iustin Pop

2864 a8083063 Iustin Pop
    """
2865 a8083063 Iustin Pop
    env = {
2866 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2867 a8083063 Iustin Pop
      }
2868 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2869 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
2870 a8083063 Iustin Pop
    return env, nl, nl
2871 a8083063 Iustin Pop
2872 a8083063 Iustin Pop
  def CheckPrereq(self):
2873 a8083063 Iustin Pop
    """Check prerequisites.
2874 a8083063 Iustin Pop

2875 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2876 a8083063 Iustin Pop

2877 a8083063 Iustin Pop
    """
2878 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2879 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2880 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2881 a8083063 Iustin Pop
2882 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2883 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2884 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2885 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2886 2a710df1 Michael Hanselmann
2887 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2888 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2889 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2890 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2891 2a710df1 Michael Hanselmann
2892 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2893 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2894 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
2895 338e51e8 Iustin Pop
                         instance.name, bep[constants.BE_MEMORY],
2896 e69d05fd Iustin Pop
                         instance.hypervisor)
2897 3a7c308e Guido Trotter
2898 a8083063 Iustin Pop
    # check bridge existance
2899 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2900 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(target_node, brlist):
2901 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2902 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2903 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2904 a8083063 Iustin Pop
2905 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2906 a8083063 Iustin Pop
    """Failover an instance.
2907 a8083063 Iustin Pop

2908 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2909 a8083063 Iustin Pop
    starting it on the secondary.
2910 a8083063 Iustin Pop

2911 a8083063 Iustin Pop
    """
2912 a8083063 Iustin Pop
    instance = self.instance
2913 a8083063 Iustin Pop
2914 a8083063 Iustin Pop
    source_node = instance.primary_node
2915 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2916 a8083063 Iustin Pop
2917 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2918 a8083063 Iustin Pop
    for dev in instance.disks:
2919 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2920 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
2921 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2922 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2923 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2924 a8083063 Iustin Pop
2925 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2926 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
2927 9a4f63d1 Iustin Pop
                 instance.name, source_node)
2928 a8083063 Iustin Pop
2929 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(source_node, instance):
2930 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2931 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
2932 86d9d3bb Iustin Pop
                             " Proceeding"
2933 86d9d3bb Iustin Pop
                             " anyway. Please make sure node %s is down",
2934 86d9d3bb Iustin Pop
                             instance.name, source_node, source_node)
2935 24a40d57 Iustin Pop
      else:
2936 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2937 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2938 a8083063 Iustin Pop
2939 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2940 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
2941 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2942 a8083063 Iustin Pop
2943 a8083063 Iustin Pop
    instance.primary_node = target_node
2944 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2945 b6102dab Guido Trotter
    self.cfg.Update(instance)
2946 a8083063 Iustin Pop
2947 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2948 12a0cfbe Guido Trotter
    if instance.status == "up":
2949 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2950 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
2951 9a4f63d1 Iustin Pop
                   instance.name, target_node)
2952 12a0cfbe Guido Trotter
2953 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
2954 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2955 12a0cfbe Guido Trotter
      if not disks_ok:
2956 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2957 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2958 a8083063 Iustin Pop
2959 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2960 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(target_node, instance, None):
2961 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2962 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2963 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2964 a8083063 Iustin Pop
2965 a8083063 Iustin Pop
2966 b9bddb6b Iustin Pop
def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
2967 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2968 a8083063 Iustin Pop

2969 a8083063 Iustin Pop
  This always creates all devices.
2970 a8083063 Iustin Pop

2971 a8083063 Iustin Pop
  """
2972 a8083063 Iustin Pop
  if device.children:
2973 a8083063 Iustin Pop
    for child in device.children:
2974 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
2975 a8083063 Iustin Pop
        return False
2976 a8083063 Iustin Pop
2977 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
2978 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
2979 72737a7f Iustin Pop
                                       instance.name, True, info)
2980 a8083063 Iustin Pop
  if not new_id:
2981 a8083063 Iustin Pop
    return False
2982 a8083063 Iustin Pop
  if device.physical_id is None:
2983 a8083063 Iustin Pop
    device.physical_id = new_id
2984 a8083063 Iustin Pop
  return True
2985 a8083063 Iustin Pop
2986 a8083063 Iustin Pop
2987 b9bddb6b Iustin Pop
def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
2988 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2989 a8083063 Iustin Pop

2990 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2991 a8083063 Iustin Pop
  all its children.
2992 a8083063 Iustin Pop

2993 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2994 a8083063 Iustin Pop

2995 a8083063 Iustin Pop
  """
2996 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2997 a8083063 Iustin Pop
    force = True
2998 a8083063 Iustin Pop
  if device.children:
2999 a8083063 Iustin Pop
    for child in device.children:
3000 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, node, instance,
3001 3f78eef2 Iustin Pop
                                        child, force, info):
3002 a8083063 Iustin Pop
        return False
3003 a8083063 Iustin Pop
3004 a8083063 Iustin Pop
  if not force:
3005 a8083063 Iustin Pop
    return True
3006 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3007 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3008 72737a7f Iustin Pop
                                       instance.name, False, info)
3009 a8083063 Iustin Pop
  if not new_id:
3010 a8083063 Iustin Pop
    return False
3011 a8083063 Iustin Pop
  if device.physical_id is None:
3012 a8083063 Iustin Pop
    device.physical_id = new_id
3013 a8083063 Iustin Pop
  return True
3014 a8083063 Iustin Pop
3015 a8083063 Iustin Pop
3016 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
3017 923b1523 Iustin Pop
  """Generate a suitable LV name.
3018 923b1523 Iustin Pop

3019 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
3020 923b1523 Iustin Pop

3021 923b1523 Iustin Pop
  """
3022 923b1523 Iustin Pop
  results = []
3023 923b1523 Iustin Pop
  for val in exts:
3024 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
3025 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
3026 923b1523 Iustin Pop
  return results
3027 923b1523 Iustin Pop
3028 923b1523 Iustin Pop
3029 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3030 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
3031 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
3032 a1f445d3 Iustin Pop

3033 a1f445d3 Iustin Pop
  """
3034 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
3035 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3036 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
3037 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3038 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
3039 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3040 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
3041 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3042 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
3043 f9518d38 Iustin Pop
                                      p_minor, s_minor,
3044 f9518d38 Iustin Pop
                                      shared_secret),
3045 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
3046 a1f445d3 Iustin Pop
                          iv_name=iv_name)
3047 a1f445d3 Iustin Pop
  return drbd_dev
3048 a1f445d3 Iustin Pop
3049 7c0d6283 Michael Hanselmann
3050 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
3051 a8083063 Iustin Pop
                          instance_name, primary_node,
3052 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
3053 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
3054 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
3055 a8083063 Iustin Pop

3056 a8083063 Iustin Pop
  """
3057 a8083063 Iustin Pop
  #TODO: compute space requirements
3058 a8083063 Iustin Pop
3059 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3060 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
3061 a8083063 Iustin Pop
    disks = []
3062 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
3063 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
3064 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3065 923b1523 Iustin Pop
3066 b9bddb6b Iustin Pop
    names = _GenerateUniqueNames(lu, [".sda", ".sdb"])
3067 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
3068 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
3069 a8083063 Iustin Pop
                           iv_name = "sda")
3070 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
3071 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
3072 a8083063 Iustin Pop
                           iv_name = "sdb")
3073 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
3074 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
3075 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
3076 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3077 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
3078 ffa1c0dc Iustin Pop
    (minor_pa, minor_pb,
3079 b9bddb6b Iustin Pop
     minor_sa, minor_sb) = lu.cfg.AllocateDRBDMinor(
3080 a1578d63 Iustin Pop
      [primary_node, primary_node, remote_node, remote_node], instance_name)
3081 ffa1c0dc Iustin Pop
3082 b9bddb6b Iustin Pop
    names = _GenerateUniqueNames(lu, [".sda_data", ".sda_meta",
3083 b9bddb6b Iustin Pop
                                      ".sdb_data", ".sdb_meta"])
3084 b9bddb6b Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3085 ffa1c0dc Iustin Pop
                                        disk_sz, names[0:2], "sda",
3086 ffa1c0dc Iustin Pop
                                        minor_pa, minor_sa)
3087 b9bddb6b Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3088 ffa1c0dc Iustin Pop
                                        swap_sz, names[2:4], "sdb",
3089 ffa1c0dc Iustin Pop
                                        minor_pb, minor_sb)
3090 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
3091 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3092 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3093 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3094 0f1a06e3 Manuel Franceschini
3095 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
3096 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
3097 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
3098 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
3099 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
3100 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
3101 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
3102 a8083063 Iustin Pop
  else:
3103 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3104 a8083063 Iustin Pop
  return disks
3105 a8083063 Iustin Pop
3106 a8083063 Iustin Pop
3107 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3108 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3109 3ecf6786 Iustin Pop

3110 3ecf6786 Iustin Pop
  """
3111 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3112 a0c3fea1 Michael Hanselmann
3113 a0c3fea1 Michael Hanselmann
3114 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
3115 a8083063 Iustin Pop
  """Create all disks for an instance.
3116 a8083063 Iustin Pop

3117 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3118 a8083063 Iustin Pop

3119 a8083063 Iustin Pop
  Args:
3120 a8083063 Iustin Pop
    instance: the instance object
3121 a8083063 Iustin Pop

3122 a8083063 Iustin Pop
  Returns:
3123 a8083063 Iustin Pop
    True or False showing the success of the creation process
3124 a8083063 Iustin Pop

3125 a8083063 Iustin Pop
  """
3126 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3127 a0c3fea1 Michael Hanselmann
3128 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3129 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3130 72737a7f Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(instance.primary_node,
3131 72737a7f Iustin Pop
                                                 file_storage_dir)
3132 0f1a06e3 Manuel Franceschini
3133 0f1a06e3 Manuel Franceschini
    if not result:
3134 9a4f63d1 Iustin Pop
      logging.error("Could not connect to node '%s'", instance.primary_node)
3135 0f1a06e3 Manuel Franceschini
      return False
3136 0f1a06e3 Manuel Franceschini
3137 0f1a06e3 Manuel Franceschini
    if not result[0]:
3138 9a4f63d1 Iustin Pop
      logging.error("Failed to create directory '%s'", file_storage_dir)
3139 0f1a06e3 Manuel Franceschini
      return False
3140 0f1a06e3 Manuel Franceschini
3141 a8083063 Iustin Pop
  for device in instance.disks:
3142 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
3143 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
3144 a8083063 Iustin Pop
    #HARDCODE
3145 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3146 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
3147 3f78eef2 Iustin Pop
                                        device, False, info):
3148 9a4f63d1 Iustin Pop
        logging.error("Failed to create volume %s (%s) on secondary node %s!",
3149 9a4f63d1 Iustin Pop
                      device.iv_name, device, secondary_node)
3150 a8083063 Iustin Pop
        return False
3151 a8083063 Iustin Pop
    #HARDCODE
3152 b9bddb6b Iustin Pop
    if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
3153 3f78eef2 Iustin Pop
                                    instance, device, info):
3154 9a4f63d1 Iustin Pop
      logging.error("Failed to create volume %s on primary!", device.iv_name)
3155 a8083063 Iustin Pop
      return False
3156 1c6e3627 Manuel Franceschini
3157 a8083063 Iustin Pop
  return True
3158 a8083063 Iustin Pop
3159 a8083063 Iustin Pop
3160 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
3161 a8083063 Iustin Pop
  """Remove all disks for an instance.
3162 a8083063 Iustin Pop

3163 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3164 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3165 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3166 a8083063 Iustin Pop
  with `_CreateDisks()`).
3167 a8083063 Iustin Pop

3168 a8083063 Iustin Pop
  Args:
3169 a8083063 Iustin Pop
    instance: the instance object
3170 a8083063 Iustin Pop

3171 a8083063 Iustin Pop
  Returns:
3172 a8083063 Iustin Pop
    True or False showing the success of the removal proces
3173 a8083063 Iustin Pop

3174 a8083063 Iustin Pop
  """
3175 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
3176 a8083063 Iustin Pop
3177 a8083063 Iustin Pop
  result = True
3178 a8083063 Iustin Pop
  for device in instance.disks:
3179 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3180 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
3181 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_remove(node, disk):
3182 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not remove block device %s on node %s,"
3183 86d9d3bb Iustin Pop
                           " continuing anyway", device.iv_name, node)
3184 a8083063 Iustin Pop
        result = False
3185 0f1a06e3 Manuel Franceschini
3186 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3187 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3188 72737a7f Iustin Pop
    if not lu.rpc.call_file_storage_dir_remove(instance.primary_node,
3189 72737a7f Iustin Pop
                                               file_storage_dir):
3190 9a4f63d1 Iustin Pop
      logging.error("Could not remove directory '%s'", file_storage_dir)
3191 0f1a06e3 Manuel Franceschini
      result = False
3192 0f1a06e3 Manuel Franceschini
3193 a8083063 Iustin Pop
  return result
3194 a8083063 Iustin Pop
3195 a8083063 Iustin Pop
3196 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
3197 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3198 e2fe6369 Iustin Pop

3199 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
3200 e2fe6369 Iustin Pop

3201 e2fe6369 Iustin Pop
  """
3202 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3203 e2fe6369 Iustin Pop
  req_size_dict = {
3204 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3205 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
3206 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
3207 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
3208 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3209 e2fe6369 Iustin Pop
  }
3210 e2fe6369 Iustin Pop
3211 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3212 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3213 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3214 e2fe6369 Iustin Pop
3215 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3216 e2fe6369 Iustin Pop
3217 e2fe6369 Iustin Pop
3218 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
3219 74409b12 Iustin Pop
  """Hypervisor parameter validation.
3220 74409b12 Iustin Pop

3221 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
3222 74409b12 Iustin Pop
  used in both instance create and instance modify.
3223 74409b12 Iustin Pop

3224 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
3225 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
3226 74409b12 Iustin Pop
  @type nodenames: list
3227 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
3228 74409b12 Iustin Pop
  @type hvname: string
3229 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
3230 74409b12 Iustin Pop
  @type hvparams: dict
3231 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
3232 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
3233 74409b12 Iustin Pop

3234 74409b12 Iustin Pop
  """
3235 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
3236 74409b12 Iustin Pop
                                                  hvname,
3237 74409b12 Iustin Pop
                                                  hvparams)
3238 74409b12 Iustin Pop
  for node in nodenames:
3239 74409b12 Iustin Pop
    info = hvinfo.get(node, None)
3240 74409b12 Iustin Pop
    if not info or not isinstance(info, (tuple, list)):
3241 74409b12 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
3242 74409b12 Iustin Pop
                                 " from node '%s' (%s)" % (node, info))
3243 74409b12 Iustin Pop
    if not info[0]:
3244 74409b12 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
3245 74409b12 Iustin Pop
                                 " %s" % info[1])
3246 74409b12 Iustin Pop
3247 74409b12 Iustin Pop
3248 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3249 a8083063 Iustin Pop
  """Create an instance.
3250 a8083063 Iustin Pop

3251 a8083063 Iustin Pop
  """
3252 a8083063 Iustin Pop
  HPATH = "instance-add"
3253 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3254 338e51e8 Iustin Pop
  _OP_REQP = ["instance_name", "disk_size",
3255 338e51e8 Iustin Pop
              "disk_template", "swap_size", "mode", "start",
3256 338e51e8 Iustin Pop
              "wait_for_sync", "ip_check", "mac",
3257 338e51e8 Iustin Pop
              "hvparams", "beparams"]
3258 7baf741d Guido Trotter
  REQ_BGL = False
3259 7baf741d Guido Trotter
3260 7baf741d Guido Trotter
  def _ExpandNode(self, node):
3261 7baf741d Guido Trotter
    """Expands and checks one node name.
3262 7baf741d Guido Trotter

3263 7baf741d Guido Trotter
    """
3264 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
3265 7baf741d Guido Trotter
    if node_full is None:
3266 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
3267 7baf741d Guido Trotter
    return node_full
3268 7baf741d Guido Trotter
3269 7baf741d Guido Trotter
  def ExpandNames(self):
3270 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
3271 7baf741d Guido Trotter

3272 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
3273 7baf741d Guido Trotter

3274 7baf741d Guido Trotter
    """
3275 7baf741d Guido Trotter
    self.needed_locks = {}
3276 7baf741d Guido Trotter
3277 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
3278 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
3279 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
3280 7baf741d Guido Trotter
        setattr(self.op, attr, None)
3281 7baf741d Guido Trotter
3282 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
3283 4b2f38dd Iustin Pop
3284 7baf741d Guido Trotter
    # verify creation mode
3285 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
3286 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
3287 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3288 7baf741d Guido Trotter
                                 self.op.mode)
3289 4b2f38dd Iustin Pop
3290 7baf741d Guido Trotter
    # disk template and mirror node verification
3291 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3292 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
3293 7baf741d Guido Trotter
3294 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
3295 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
3296 4b2f38dd Iustin Pop
3297 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3298 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
3299 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
3300 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3301 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
3302 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
3303 4b2f38dd Iustin Pop
3304 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
3305 6785674e Iustin Pop
3306 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
3307 8705eb96 Iustin Pop
                                  self.op.hvparams)
3308 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
3309 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
3310 6785674e Iustin Pop
3311 338e51e8 Iustin Pop
    # fill and remember the beparams dict
3312 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
3313 338e51e8 Iustin Pop
                                    self.op.beparams)
3314 338e51e8 Iustin Pop
3315 7baf741d Guido Trotter
    #### instance parameters check
3316 7baf741d Guido Trotter
3317 7baf741d Guido Trotter
    # instance name verification
3318 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
3319 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
3320 7baf741d Guido Trotter
3321 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
3322 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
3323 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
3324 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3325 7baf741d Guido Trotter
                                 instance_name)
3326 7baf741d Guido Trotter
3327 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3328 7baf741d Guido Trotter
3329 7baf741d Guido Trotter
    # ip validity checks
3330 7baf741d Guido Trotter
    ip = getattr(self.op, "ip", None)
3331 7baf741d Guido Trotter
    if ip is None or ip.lower() == "none":
3332 7baf741d Guido Trotter
      inst_ip = None
3333 6fde8221 Guido Trotter
    elif ip.lower() == constants.VALUE_AUTO:
3334 7baf741d Guido Trotter
      inst_ip = hostname1.ip
3335 7baf741d Guido Trotter
    else:
3336 7baf741d Guido Trotter
      if not utils.IsValidIP(ip):
3337 7baf741d Guido Trotter
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3338 7baf741d Guido Trotter
                                   " like a valid IP" % ip)
3339 7baf741d Guido Trotter
      inst_ip = ip
3340 7baf741d Guido Trotter
    self.inst_ip = self.op.ip = inst_ip
3341 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
3342 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
3343 7baf741d Guido Trotter
3344 7baf741d Guido Trotter
    # MAC address verification
3345 c78995f0 Guido Trotter
    if self.op.mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3346 7baf741d Guido Trotter
      if not utils.IsValidMac(self.op.mac.lower()):
3347 7baf741d Guido Trotter
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3348 7baf741d Guido Trotter
                                   self.op.mac)
3349 7baf741d Guido Trotter
3350 7baf741d Guido Trotter
    # file storage checks
3351 7baf741d Guido Trotter
    if (self.op.file_driver and
3352 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
3353 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3354 7baf741d Guido Trotter
                                 self.op.file_driver)
3355 7baf741d Guido Trotter
3356 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3357 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
3358 7baf741d Guido Trotter
3359 7baf741d Guido Trotter
    ### Node/iallocator related checks
3360 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3361 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3362 7baf741d Guido Trotter
                                 " node must be given")
3363 7baf741d Guido Trotter
3364 7baf741d Guido Trotter
    if self.op.iallocator:
3365 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3366 7baf741d Guido Trotter
    else:
3367 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
3368 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
3369 7baf741d Guido Trotter
      if self.op.snode is not None:
3370 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
3371 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
3372 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3373 7baf741d Guido Trotter
3374 7baf741d Guido Trotter
    # in case of import lock the source node too
3375 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
3376 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
3377 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
3378 7baf741d Guido Trotter
3379 7baf741d Guido Trotter
      if src_node is None or src_path is None:
3380 7baf741d Guido Trotter
        raise errors.OpPrereqError("Importing an instance requires source"
3381 7baf741d Guido Trotter
                                   " node and path options")
3382 7baf741d Guido Trotter
3383 7baf741d Guido Trotter
      if not os.path.isabs(src_path):
3384 7baf741d Guido Trotter
        raise errors.OpPrereqError("The source path must be absolute")
3385 7baf741d Guido Trotter
3386 7baf741d Guido Trotter
      self.op.src_node = src_node = self._ExpandNode(src_node)
3387 7baf741d Guido Trotter
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3388 7baf741d Guido Trotter
        self.needed_locks[locking.LEVEL_NODE].append(src_node)
3389 7baf741d Guido Trotter
3390 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
3391 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
3392 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
3393 a8083063 Iustin Pop
3394 538475ca Iustin Pop
  def _RunAllocator(self):
3395 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3396 538475ca Iustin Pop

3397 538475ca Iustin Pop
    """
3398 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3399 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3400 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3401 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3402 72737a7f Iustin Pop
    ial = IAllocator(self,
3403 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3404 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3405 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3406 d1c2dd75 Iustin Pop
                     tags=[],
3407 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3408 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
3409 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
3410 d1c2dd75 Iustin Pop
                     disks=disks,
3411 d1c2dd75 Iustin Pop
                     nics=nics,
3412 29859cb7 Iustin Pop
                     )
3413 d1c2dd75 Iustin Pop
3414 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3415 d1c2dd75 Iustin Pop
3416 d1c2dd75 Iustin Pop
    if not ial.success:
3417 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3418 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3419 d1c2dd75 Iustin Pop
                                                           ial.info))
3420 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3421 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3422 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3423 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
3424 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
3425 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3426 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
3427 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
3428 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
3429 27579978 Iustin Pop
    if ial.required_nodes == 2:
3430 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3431 538475ca Iustin Pop
3432 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3433 a8083063 Iustin Pop
    """Build hooks env.
3434 a8083063 Iustin Pop

3435 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3436 a8083063 Iustin Pop

3437 a8083063 Iustin Pop
    """
3438 a8083063 Iustin Pop
    env = {
3439 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3440 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3441 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3442 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3443 a8083063 Iustin Pop
      }
3444 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3445 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3446 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3447 09acf207 Guido Trotter
      env["INSTANCE_SRC_IMAGES"] = self.src_images
3448 396e1b78 Michael Hanselmann
3449 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3450 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3451 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3452 396e1b78 Michael Hanselmann
      status=self.instance_status,
3453 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3454 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
3455 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
3456 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3457 396e1b78 Michael Hanselmann
    ))
3458 a8083063 Iustin Pop
3459 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
3460 a8083063 Iustin Pop
          self.secondaries)
3461 a8083063 Iustin Pop
    return env, nl, nl
3462 a8083063 Iustin Pop
3463 a8083063 Iustin Pop
3464 a8083063 Iustin Pop
  def CheckPrereq(self):
3465 a8083063 Iustin Pop
    """Check prerequisites.
3466 a8083063 Iustin Pop

3467 a8083063 Iustin Pop
    """
3468 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3469 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3470 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3471 eedc99de Manuel Franceschini
                                 " instances")
3472 eedc99de Manuel Franceschini
3473 e69d05fd Iustin Pop
3474 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3475 7baf741d Guido Trotter
      src_node = self.op.src_node
3476 7baf741d Guido Trotter
      src_path = self.op.src_path
3477 a8083063 Iustin Pop
3478 72737a7f Iustin Pop
      export_info = self.rpc.call_export_info(src_node, src_path)
3479 a8083063 Iustin Pop
3480 a8083063 Iustin Pop
      if not export_info:
3481 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3482 a8083063 Iustin Pop
3483 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3484 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3485 a8083063 Iustin Pop
3486 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3487 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3488 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3489 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3490 a8083063 Iustin Pop
3491 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
3492 09acf207 Guido Trotter
      # TODO: substitute "2" with the actual number of disks requested
3493 09acf207 Guido Trotter
      instance_disks = 2
3494 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
3495 09acf207 Guido Trotter
      if instance_disks < export_disks:
3496 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
3497 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
3498 09acf207 Guido Trotter
                                   (2, export_disks))
3499 a8083063 Iustin Pop
3500 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3501 09acf207 Guido Trotter
      disk_images = []
3502 09acf207 Guido Trotter
      for idx in range(export_disks):
3503 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
3504 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
3505 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
3506 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
3507 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
3508 09acf207 Guido Trotter
          disk_images.append(image)
3509 09acf207 Guido Trotter
        else:
3510 09acf207 Guido Trotter
          disk_images.append(False)
3511 09acf207 Guido Trotter
3512 09acf207 Guido Trotter
      self.src_images = disk_images
3513 901a65c1 Iustin Pop
3514 bc89efc3 Guido Trotter
      if self.op.mac == constants.VALUE_AUTO:
3515 bc89efc3 Guido Trotter
        old_name = export_info.get(constants.INISECT_INS, 'name')
3516 bc89efc3 Guido Trotter
        if self.op.instance_name == old_name:
3517 bc89efc3 Guido Trotter
          # FIXME: adjust every nic, when we'll be able to create instances
3518 bc89efc3 Guido Trotter
          # with more than one
3519 bc89efc3 Guido Trotter
          if int(export_info.get(constants.INISECT_INS, 'nic_count')) >= 1:
3520 bc89efc3 Guido Trotter
            self.op.mac = export_info.get(constants.INISECT_INS, 'nic_0_mac')
3521 bc89efc3 Guido Trotter
3522 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3523 901a65c1 Iustin Pop
3524 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3525 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3526 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3527 901a65c1 Iustin Pop
3528 901a65c1 Iustin Pop
    if self.op.ip_check:
3529 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3530 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3531 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
3532 901a65c1 Iustin Pop
3533 901a65c1 Iustin Pop
    # bridge verification
3534 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3535 901a65c1 Iustin Pop
    if bridge is None:
3536 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3537 901a65c1 Iustin Pop
    else:
3538 901a65c1 Iustin Pop
      self.op.bridge = bridge
3539 901a65c1 Iustin Pop
3540 538475ca Iustin Pop
    #### allocator run
3541 538475ca Iustin Pop
3542 538475ca Iustin Pop
    if self.op.iallocator is not None:
3543 538475ca Iustin Pop
      self._RunAllocator()
3544 0f1a06e3 Manuel Franceschini
3545 901a65c1 Iustin Pop
    #### node related checks
3546 901a65c1 Iustin Pop
3547 901a65c1 Iustin Pop
    # check primary node
3548 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3549 7baf741d Guido Trotter
    assert self.pnode is not None, \
3550 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
3551 901a65c1 Iustin Pop
    self.secondaries = []
3552 901a65c1 Iustin Pop
3553 901a65c1 Iustin Pop
    # mirror node verification
3554 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3555 7baf741d Guido Trotter
      if self.op.snode is None:
3556 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3557 3ecf6786 Iustin Pop
                                   " a mirror node")
3558 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
3559 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3560 3ecf6786 Iustin Pop
                                   " the primary node.")
3561 7baf741d Guido Trotter
      self.secondaries.append(self.op.snode)
3562 a8083063 Iustin Pop
3563 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
3564 6785674e Iustin Pop
3565 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3566 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3567 ed1ebc60 Guido Trotter
3568 8d75db10 Iustin Pop
    # Check lv size requirements
3569 8d75db10 Iustin Pop
    if req_size is not None:
3570 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3571 72737a7f Iustin Pop
                                         self.op.hypervisor)
3572 8d75db10 Iustin Pop
      for node in nodenames:
3573 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3574 8d75db10 Iustin Pop
        if not info:
3575 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3576 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3577 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3578 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3579 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3580 8d75db10 Iustin Pop
                                     " node %s" % node)
3581 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3582 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3583 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3584 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3585 ed1ebc60 Guido Trotter
3586 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
3587 6785674e Iustin Pop
3588 a8083063 Iustin Pop
    # os verification
3589 72737a7f Iustin Pop
    os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
3590 dfa96ded Guido Trotter
    if not os_obj:
3591 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3592 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3593 a8083063 Iustin Pop
3594 901a65c1 Iustin Pop
    # bridge check on primary node
3595 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3596 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3597 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3598 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3599 a8083063 Iustin Pop
3600 49ce1563 Iustin Pop
    # memory check on primary node
3601 49ce1563 Iustin Pop
    if self.op.start:
3602 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
3603 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3604 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
3605 338e51e8 Iustin Pop
                           self.op.hypervisor)
3606 49ce1563 Iustin Pop
3607 a8083063 Iustin Pop
    if self.op.start:
3608 a8083063 Iustin Pop
      self.instance_status = 'up'
3609 a8083063 Iustin Pop
    else:
3610 a8083063 Iustin Pop
      self.instance_status = 'down'
3611 a8083063 Iustin Pop
3612 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3613 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3614 a8083063 Iustin Pop

3615 a8083063 Iustin Pop
    """
3616 a8083063 Iustin Pop
    instance = self.op.instance_name
3617 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3618 a8083063 Iustin Pop
3619 c78995f0 Guido Trotter
    if self.op.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3620 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3621 1862d460 Alexander Schreiber
    else:
3622 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3623 1862d460 Alexander Schreiber
3624 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3625 a8083063 Iustin Pop
    if self.inst_ip is not None:
3626 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3627 a8083063 Iustin Pop
3628 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
3629 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3630 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3631 2a6469d5 Alexander Schreiber
    else:
3632 2a6469d5 Alexander Schreiber
      network_port = None
3633 58acb49d Alexander Schreiber
3634 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
3635 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3636 31a853d2 Iustin Pop
3637 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3638 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3639 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3640 2c313123 Manuel Franceschini
    else:
3641 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3642 2c313123 Manuel Franceschini
3643 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3644 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3645 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
3646 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3647 0f1a06e3 Manuel Franceschini
3648 0f1a06e3 Manuel Franceschini
3649 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
3650 a8083063 Iustin Pop
                                  self.op.disk_template,
3651 a8083063 Iustin Pop
                                  instance, pnode_name,
3652 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3653 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3654 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3655 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3656 a8083063 Iustin Pop
3657 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3658 a8083063 Iustin Pop
                            primary_node=pnode_name,
3659 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3660 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3661 a8083063 Iustin Pop
                            status=self.instance_status,
3662 58acb49d Alexander Schreiber
                            network_port=network_port,
3663 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
3664 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
3665 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
3666 a8083063 Iustin Pop
                            )
3667 a8083063 Iustin Pop
3668 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3669 b9bddb6b Iustin Pop
    if not _CreateDisks(self, iobj):
3670 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3671 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance)
3672 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3673 a8083063 Iustin Pop
3674 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3675 a8083063 Iustin Pop
3676 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3677 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
3678 7baf741d Guido Trotter
    # added the instance to the config
3679 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
3680 a1578d63 Iustin Pop
    # Remove the temp. assignements for the instance's drbds
3681 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance)
3682 a8083063 Iustin Pop
3683 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3684 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
3685 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3686 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3687 a8083063 Iustin Pop
      time.sleep(15)
3688 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3689 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
3690 a8083063 Iustin Pop
    else:
3691 a8083063 Iustin Pop
      disk_abort = False
3692 a8083063 Iustin Pop
3693 a8083063 Iustin Pop
    if disk_abort:
3694 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3695 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3696 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
3697 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3698 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3699 3ecf6786 Iustin Pop
                               " this instance")
3700 a8083063 Iustin Pop
3701 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3702 a8083063 Iustin Pop
                (instance, pnode_name))
3703 a8083063 Iustin Pop
3704 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3705 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3706 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3707 d15a9ad3 Guido Trotter
        if not self.rpc.call_instance_os_add(pnode_name, iobj):
3708 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3709 3ecf6786 Iustin Pop
                                   " on node %s" %
3710 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3711 a8083063 Iustin Pop
3712 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3713 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3714 a8083063 Iustin Pop
        src_node = self.op.src_node
3715 09acf207 Guido Trotter
        src_images = self.src_images
3716 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
3717 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
3718 09acf207 Guido Trotter
                                                         src_node, src_images,
3719 6c0af70e Guido Trotter
                                                         cluster_name)
3720 09acf207 Guido Trotter
        for idx, result in enumerate(import_result):
3721 09acf207 Guido Trotter
          if not result:
3722 09acf207 Guido Trotter
            self.LogWarning("Could not image %s for on instance %s, disk %d,"
3723 09acf207 Guido Trotter
                            " on node %s" % (src_images[idx], instance, idx,
3724 09acf207 Guido Trotter
                                             pnode_name))
3725 a8083063 Iustin Pop
      else:
3726 a8083063 Iustin Pop
        # also checked in the prereq part
3727 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3728 3ecf6786 Iustin Pop
                                     % self.op.mode)
3729 a8083063 Iustin Pop
3730 a8083063 Iustin Pop
    if self.op.start:
3731 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
3732 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3733 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(pnode_name, iobj, None):
3734 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3735 a8083063 Iustin Pop
3736 a8083063 Iustin Pop
3737 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3738 a8083063 Iustin Pop
  """Connect to an instance's console.
3739 a8083063 Iustin Pop

3740 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3741 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3742 a8083063 Iustin Pop
  console.
3743 a8083063 Iustin Pop

3744 a8083063 Iustin Pop
  """
3745 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3746 8659b73e Guido Trotter
  REQ_BGL = False
3747 8659b73e Guido Trotter
3748 8659b73e Guido Trotter
  def ExpandNames(self):
3749 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3750 a8083063 Iustin Pop
3751 a8083063 Iustin Pop
  def CheckPrereq(self):
3752 a8083063 Iustin Pop
    """Check prerequisites.
3753 a8083063 Iustin Pop

3754 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3755 a8083063 Iustin Pop

3756 a8083063 Iustin Pop
    """
3757 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3758 8659b73e Guido Trotter
    assert self.instance is not None, \
3759 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3760 a8083063 Iustin Pop
3761 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3762 a8083063 Iustin Pop
    """Connect to the console of an instance
3763 a8083063 Iustin Pop

3764 a8083063 Iustin Pop
    """
3765 a8083063 Iustin Pop
    instance = self.instance
3766 a8083063 Iustin Pop
    node = instance.primary_node
3767 a8083063 Iustin Pop
3768 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
3769 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
3770 a8083063 Iustin Pop
    if node_insts is False:
3771 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3772 a8083063 Iustin Pop
3773 a8083063 Iustin Pop
    if instance.name not in node_insts:
3774 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3775 a8083063 Iustin Pop
3776 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
3777 a8083063 Iustin Pop
3778 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
3779 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3780 b047857b Michael Hanselmann
3781 82122173 Iustin Pop
    # build ssh cmdline
3782 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3783 a8083063 Iustin Pop
3784 a8083063 Iustin Pop
3785 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3786 a8083063 Iustin Pop
  """Replace the disks of an instance.
3787 a8083063 Iustin Pop

3788 a8083063 Iustin Pop
  """
3789 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3790 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3791 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3792 efd990e4 Guido Trotter
  REQ_BGL = False
3793 efd990e4 Guido Trotter
3794 efd990e4 Guido Trotter
  def ExpandNames(self):
3795 efd990e4 Guido Trotter
    self._ExpandAndLockInstance()
3796 efd990e4 Guido Trotter
3797 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
3798 efd990e4 Guido Trotter
      self.op.remote_node = None
3799 efd990e4 Guido Trotter
3800 efd990e4 Guido Trotter
    ia_name = getattr(self.op, "iallocator", None)
3801 efd990e4 Guido Trotter
    if ia_name is not None:
3802 efd990e4 Guido Trotter
      if self.op.remote_node is not None:
3803 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
3804 efd990e4 Guido Trotter
                                   " secondary, not both")
3805 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3806 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
3807 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3808 efd990e4 Guido Trotter
      if remote_node is None:
3809 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
3810 efd990e4 Guido Trotter
                                   self.op.remote_node)
3811 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
3812 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
3813 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3814 efd990e4 Guido Trotter
    else:
3815 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
3816 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3817 efd990e4 Guido Trotter
3818 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
3819 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
3820 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
3821 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
3822 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
3823 efd990e4 Guido Trotter
      self._LockInstancesNodes()
3824 a8083063 Iustin Pop
3825 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3826 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3827 b6e82a65 Iustin Pop

3828 b6e82a65 Iustin Pop
    """
3829 72737a7f Iustin Pop
    ial = IAllocator(self,
3830 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3831 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3832 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3833 b6e82a65 Iustin Pop
3834 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3835 b6e82a65 Iustin Pop
3836 b6e82a65 Iustin Pop
    if not ial.success:
3837 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3838 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3839 b6e82a65 Iustin Pop
                                                           ial.info))
3840 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3841 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3842 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3843 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3844 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3845 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
3846 86d9d3bb Iustin Pop
                 self.op.remote_node)
3847 b6e82a65 Iustin Pop
3848 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3849 a8083063 Iustin Pop
    """Build hooks env.
3850 a8083063 Iustin Pop

3851 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3852 a8083063 Iustin Pop

3853 a8083063 Iustin Pop
    """
3854 a8083063 Iustin Pop
    env = {
3855 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3856 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3857 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3858 a8083063 Iustin Pop
      }
3859 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3860 0834c866 Iustin Pop
    nl = [
3861 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
3862 0834c866 Iustin Pop
      self.instance.primary_node,
3863 0834c866 Iustin Pop
      ]
3864 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3865 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3866 a8083063 Iustin Pop
    return env, nl, nl
3867 a8083063 Iustin Pop
3868 a8083063 Iustin Pop
  def CheckPrereq(self):
3869 a8083063 Iustin Pop
    """Check prerequisites.
3870 a8083063 Iustin Pop

3871 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3872 a8083063 Iustin Pop

3873 a8083063 Iustin Pop
    """
3874 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3875 efd990e4 Guido Trotter
    assert instance is not None, \
3876 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3877 a8083063 Iustin Pop
    self.instance = instance
3878 a8083063 Iustin Pop
3879 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3880 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3881 a9e0c397 Iustin Pop
                                 " network mirrored.")
3882 a8083063 Iustin Pop
3883 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3884 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3885 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3886 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3887 a8083063 Iustin Pop
3888 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3889 a9e0c397 Iustin Pop
3890 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3891 b6e82a65 Iustin Pop
    if ia_name is not None:
3892 de8c7666 Guido Trotter
      self._RunAllocator()
3893 b6e82a65 Iustin Pop
3894 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3895 a9e0c397 Iustin Pop
    if remote_node is not None:
3896 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3897 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
3898 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
3899 a9e0c397 Iustin Pop
    else:
3900 a9e0c397 Iustin Pop
      self.remote_node_info = None
3901 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3902 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3903 3ecf6786 Iustin Pop
                                 " the instance.")
3904 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3905 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3906 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3907 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3908 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3909 0834c866 Iustin Pop
                                   " replacement")
3910 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3911 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3912 7df43a76 Iustin Pop
          remote_node is not None):
3913 7df43a76 Iustin Pop
        # switch to replace secondary mode
3914 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3915 7df43a76 Iustin Pop
3916 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3917 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3918 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3919 a9e0c397 Iustin Pop
                                   " both at once")
3920 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3921 a9e0c397 Iustin Pop
        if remote_node is not None:
3922 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3923 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3924 a9e0c397 Iustin Pop
                                     " node disk replacement")
3925 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3926 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3927 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3928 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3929 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3930 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3931 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3932 a9e0c397 Iustin Pop
      else:
3933 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3934 a9e0c397 Iustin Pop
3935 a9e0c397 Iustin Pop
    for name in self.op.disks:
3936 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3937 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3938 a9e0c397 Iustin Pop
                                   (name, instance.name))
3939 a8083063 Iustin Pop
3940 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3941 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3942 a9e0c397 Iustin Pop

3943 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3944 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3945 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3946 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3947 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3948 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3949 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3950 a9e0c397 Iustin Pop
      - wait for sync across all devices
3951 a9e0c397 Iustin Pop
      - for each modified disk:
3952 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3953 a9e0c397 Iustin Pop

3954 a9e0c397 Iustin Pop
    Failures are not very well handled.
3955 cff90b79 Iustin Pop

3956 a9e0c397 Iustin Pop
    """
3957 cff90b79 Iustin Pop
    steps_total = 6
3958 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3959 a9e0c397 Iustin Pop
    instance = self.instance
3960 a9e0c397 Iustin Pop
    iv_names = {}
3961 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3962 a9e0c397 Iustin Pop
    # start of work
3963 a9e0c397 Iustin Pop
    cfg = self.cfg
3964 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3965 cff90b79 Iustin Pop
    oth_node = self.oth_node
3966 cff90b79 Iustin Pop
3967 cff90b79 Iustin Pop
    # Step: check device activation
3968 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3969 cff90b79 Iustin Pop
    info("checking volume groups")
3970 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3971 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
3972 cff90b79 Iustin Pop
    if not results:
3973 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3974 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3975 cff90b79 Iustin Pop
      res = results.get(node, False)
3976 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3977 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3978 cff90b79 Iustin Pop
                                 (my_vg, node))
3979 cff90b79 Iustin Pop
    for dev in instance.disks:
3980 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3981 cff90b79 Iustin Pop
        continue
3982 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3983 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3984 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3985 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_find(node, dev):
3986 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3987 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3988 cff90b79 Iustin Pop
3989 cff90b79 Iustin Pop
    # Step: check other node consistency
3990 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3991 cff90b79 Iustin Pop
    for dev in instance.disks:
3992 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3993 cff90b79 Iustin Pop
        continue
3994 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3995 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
3996 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3997 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3998 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3999 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
4000 cff90b79 Iustin Pop
4001 cff90b79 Iustin Pop
    # Step: create new storage
4002 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4003 a9e0c397 Iustin Pop
    for dev in instance.disks:
4004 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
4005 a9e0c397 Iustin Pop
        continue
4006 a9e0c397 Iustin Pop
      size = dev.size
4007 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
4008 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
4009 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
4010 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4011 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
4012 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4013 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
4014 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
4015 a9e0c397 Iustin Pop
      old_lvs = dev.children
4016 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
4017 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
4018 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
4019 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4020 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4021 a9e0c397 Iustin Pop
      # are talking about the secondary node
4022 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
4023 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
4024 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4025 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4026 a9e0c397 Iustin Pop
                                   " node '%s'" %
4027 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
4028 a9e0c397 Iustin Pop
4029 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
4030 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
4031 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
4032 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
4033 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
4034 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
4035 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
4036 cff90b79 Iustin Pop
      #dev.children = []
4037 cff90b79 Iustin Pop
      #cfg.Update(instance)
4038 a9e0c397 Iustin Pop
4039 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
4040 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
4041 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
4042 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
4043 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
4044 cff90b79 Iustin Pop
4045 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
4046 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
4047 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
4048 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
4049 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
4050 cff90b79 Iustin Pop
      rlist = []
4051 cff90b79 Iustin Pop
      for to_ren in old_lvs:
4052 72737a7f Iustin Pop
        find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
4053 cff90b79 Iustin Pop
        if find_res is not None: # device exists
4054 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
4055 cff90b79 Iustin Pop
4056 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
4057 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4058 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
4059 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
4060 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
4061 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
4062 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4063 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
4064 cff90b79 Iustin Pop
4065 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
4066 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
4067 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
4068 a9e0c397 Iustin Pop
4069 cff90b79 Iustin Pop
      for disk in old_lvs:
4070 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
4071 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
4072 a9e0c397 Iustin Pop
4073 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
4074 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
4075 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
4076 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
4077 72737a7f Iustin Pop
          if not self.rpc.call_blockdev_remove(tgt_node, new_lv):
4078 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
4079 cff90b79 Iustin Pop
                    " logical volumes")
4080 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
4081 a9e0c397 Iustin Pop
4082 a9e0c397 Iustin Pop
      dev.children = new_lvs
4083 a9e0c397 Iustin Pop
      cfg.Update(instance)
4084 a9e0c397 Iustin Pop
4085 cff90b79 Iustin Pop
    # Step: wait for sync
4086 a9e0c397 Iustin Pop
4087 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4088 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4089 a9e0c397 Iustin Pop
    # return value
4090 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4091 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4092 a9e0c397 Iustin Pop
4093 a9e0c397 Iustin Pop
    # so check manually all the devices
4094 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4095 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
4096 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(instance.primary_node, dev)[5]
4097 a9e0c397 Iustin Pop
      if is_degr:
4098 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4099 a9e0c397 Iustin Pop
4100 cff90b79 Iustin Pop
    # Step: remove old storage
4101 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4102 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4103 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
4104 a9e0c397 Iustin Pop
      for lv in old_lvs:
4105 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
4106 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(tgt_node, lv):
4107 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
4108 a9e0c397 Iustin Pop
          continue
4109 a9e0c397 Iustin Pop
4110 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
4111 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
4112 a9e0c397 Iustin Pop

4113 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4114 a9e0c397 Iustin Pop
      - for all disks of the instance:
4115 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
4116 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
4117 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
4118 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
4119 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
4120 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
4121 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
4122 a9e0c397 Iustin Pop
          not network enabled
4123 a9e0c397 Iustin Pop
      - wait for sync across all devices
4124 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
4125 a9e0c397 Iustin Pop

4126 a9e0c397 Iustin Pop
    Failures are not very well handled.
4127 0834c866 Iustin Pop

4128 a9e0c397 Iustin Pop
    """
4129 0834c866 Iustin Pop
    steps_total = 6
4130 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4131 a9e0c397 Iustin Pop
    instance = self.instance
4132 a9e0c397 Iustin Pop
    iv_names = {}
4133 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4134 a9e0c397 Iustin Pop
    # start of work
4135 a9e0c397 Iustin Pop
    cfg = self.cfg
4136 a9e0c397 Iustin Pop
    old_node = self.tgt_node
4137 a9e0c397 Iustin Pop
    new_node = self.new_node
4138 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
4139 0834c866 Iustin Pop
4140 0834c866 Iustin Pop
    # Step: check device activation
4141 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4142 0834c866 Iustin Pop
    info("checking volume groups")
4143 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
4144 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
4145 0834c866 Iustin Pop
    if not results:
4146 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4147 0834c866 Iustin Pop
    for node in pri_node, new_node:
4148 0834c866 Iustin Pop
      res = results.get(node, False)
4149 0834c866 Iustin Pop
      if not res or my_vg not in res:
4150 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4151 0834c866 Iustin Pop
                                 (my_vg, node))
4152 0834c866 Iustin Pop
    for dev in instance.disks:
4153 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4154 0834c866 Iustin Pop
        continue
4155 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
4156 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4157 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4158 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
4159 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
4160 0834c866 Iustin Pop
4161 0834c866 Iustin Pop
    # Step: check other node consistency
4162 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4163 0834c866 Iustin Pop
    for dev in instance.disks:
4164 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4165 0834c866 Iustin Pop
        continue
4166 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
4167 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
4168 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4169 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4170 0834c866 Iustin Pop
                                 pri_node)
4171 0834c866 Iustin Pop
4172 0834c866 Iustin Pop
    # Step: create new storage
4173 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4174 468b46f9 Iustin Pop
    for dev in instance.disks:
4175 a9e0c397 Iustin Pop
      size = dev.size
4176 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
4177 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4178 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4179 a9e0c397 Iustin Pop
      # are talking about the secondary node
4180 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4181 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
4182 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4183 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4184 a9e0c397 Iustin Pop
                                   " node '%s'" %
4185 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4186 a9e0c397 Iustin Pop
4187 0834c866 Iustin Pop
4188 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
4189 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
4190 a1578d63 Iustin Pop
    # error and the success paths
4191 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4192 a1578d63 Iustin Pop
                                   instance.name)
4193 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
4194 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4195 468b46f9 Iustin Pop
    for dev, new_minor in zip(instance.disks, minors):
4196 0834c866 Iustin Pop
      size = dev.size
4197 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
4198 a9e0c397 Iustin Pop
      # create new devices on new_node
4199 ffa1c0dc Iustin Pop
      if pri_node == dev.logical_id[0]:
4200 ffa1c0dc Iustin Pop
        new_logical_id = (pri_node, new_node,
4201 f9518d38 Iustin Pop
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4202 f9518d38 Iustin Pop
                          dev.logical_id[5])
4203 ffa1c0dc Iustin Pop
      else:
4204 ffa1c0dc Iustin Pop
        new_logical_id = (new_node, pri_node,
4205 f9518d38 Iustin Pop
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4206 f9518d38 Iustin Pop
                          dev.logical_id[5])
4207 468b46f9 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children, new_logical_id)
4208 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4209 a1578d63 Iustin Pop
                    new_logical_id)
4210 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4211 ffa1c0dc Iustin Pop
                              logical_id=new_logical_id,
4212 a9e0c397 Iustin Pop
                              children=dev.children)
4213 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(self, new_node, instance,
4214 3f78eef2 Iustin Pop
                                        new_drbd, False,
4215 b9bddb6b Iustin Pop
                                        _GetInstanceInfoText(instance)):
4216 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4217 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4218 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4219 a9e0c397 Iustin Pop
4220 0834c866 Iustin Pop
    for dev in instance.disks:
4221 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4222 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
4223 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4224 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_shutdown(old_node, dev):
4225 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
4226 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4227 a9e0c397 Iustin Pop
4228 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4229 642445d9 Iustin Pop
    done = 0
4230 642445d9 Iustin Pop
    for dev in instance.disks:
4231 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4232 f9518d38 Iustin Pop
      # set the network part of the physical (unique in bdev terms) id
4233 f9518d38 Iustin Pop
      # to None, meaning detach from network
4234 f9518d38 Iustin Pop
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4235 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4236 642445d9 Iustin Pop
      # standalone state
4237 72737a7f Iustin Pop
      if self.rpc.call_blockdev_find(pri_node, dev):
4238 642445d9 Iustin Pop
        done += 1
4239 642445d9 Iustin Pop
      else:
4240 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
4241 642445d9 Iustin Pop
                dev.iv_name)
4242 642445d9 Iustin Pop
4243 642445d9 Iustin Pop
    if not done:
4244 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4245 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
4246 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4247 642445d9 Iustin Pop
4248 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4249 642445d9 Iustin Pop
    # the instance to point to the new secondary
4250 642445d9 Iustin Pop
    info("updating instance configuration")
4251 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
4252 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
4253 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4254 642445d9 Iustin Pop
    cfg.Update(instance)
4255 a1578d63 Iustin Pop
    # we can remove now the temp minors as now the new values are
4256 a1578d63 Iustin Pop
    # written to the config file (and therefore stable)
4257 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance.name)
4258 a9e0c397 Iustin Pop
4259 642445d9 Iustin Pop
    # and now perform the drbd attach
4260 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4261 642445d9 Iustin Pop
    failures = []
4262 642445d9 Iustin Pop
    for dev in instance.disks:
4263 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
4264 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4265 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4266 642445d9 Iustin Pop
      # is correct
4267 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4268 ffa1c0dc Iustin Pop
      logging.debug("Disk to attach: %s", dev)
4269 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4270 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
4271 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4272 a9e0c397 Iustin Pop
4273 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4274 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4275 a9e0c397 Iustin Pop
    # return value
4276 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4277 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4278 a9e0c397 Iustin Pop
4279 a9e0c397 Iustin Pop
    # so check manually all the devices
4280 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4281 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4282 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(pri_node, dev)[5]
4283 a9e0c397 Iustin Pop
      if is_degr:
4284 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4285 a9e0c397 Iustin Pop
4286 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4287 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4288 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
4289 a9e0c397 Iustin Pop
      for lv in old_lvs:
4290 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4291 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(old_node, lv):
4292 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4293 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4294 a9e0c397 Iustin Pop
4295 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4296 a9e0c397 Iustin Pop
    """Execute disk replacement.
4297 a9e0c397 Iustin Pop

4298 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4299 a9e0c397 Iustin Pop

4300 a9e0c397 Iustin Pop
    """
4301 a9e0c397 Iustin Pop
    instance = self.instance
4302 22985314 Guido Trotter
4303 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
4304 22985314 Guido Trotter
    if instance.status == "down":
4305 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
4306 22985314 Guido Trotter
4307 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4308 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4309 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4310 a9e0c397 Iustin Pop
      else:
4311 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4312 a9e0c397 Iustin Pop
    else:
4313 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4314 22985314 Guido Trotter
4315 22985314 Guido Trotter
    ret = fn(feedback_fn)
4316 22985314 Guido Trotter
4317 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
4318 22985314 Guido Trotter
    if instance.status == "down":
4319 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
4320 22985314 Guido Trotter
4321 22985314 Guido Trotter
    return ret
4322 a9e0c397 Iustin Pop
4323 a8083063 Iustin Pop
4324 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4325 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4326 8729e0d7 Iustin Pop

4327 8729e0d7 Iustin Pop
  """
4328 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4329 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4330 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
4331 31e63dbf Guido Trotter
  REQ_BGL = False
4332 31e63dbf Guido Trotter
4333 31e63dbf Guido Trotter
  def ExpandNames(self):
4334 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4335 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4336 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4337 31e63dbf Guido Trotter
4338 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4339 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4340 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4341 8729e0d7 Iustin Pop
4342 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4343 8729e0d7 Iustin Pop
    """Build hooks env.
4344 8729e0d7 Iustin Pop

4345 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4346 8729e0d7 Iustin Pop

4347 8729e0d7 Iustin Pop
    """
4348 8729e0d7 Iustin Pop
    env = {
4349 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4350 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4351 8729e0d7 Iustin Pop
      }
4352 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4353 8729e0d7 Iustin Pop
    nl = [
4354 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4355 8729e0d7 Iustin Pop
      self.instance.primary_node,
4356 8729e0d7 Iustin Pop
      ]
4357 8729e0d7 Iustin Pop
    return env, nl, nl
4358 8729e0d7 Iustin Pop
4359 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4360 8729e0d7 Iustin Pop
    """Check prerequisites.
4361 8729e0d7 Iustin Pop

4362 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4363 8729e0d7 Iustin Pop

4364 8729e0d7 Iustin Pop
    """
4365 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4366 31e63dbf Guido Trotter
    assert instance is not None, \
4367 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4368 31e63dbf Guido Trotter
4369 8729e0d7 Iustin Pop
    self.instance = instance
4370 8729e0d7 Iustin Pop
4371 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4372 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4373 8729e0d7 Iustin Pop
                                 " growing.")
4374 8729e0d7 Iustin Pop
4375 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
4376 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4377 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
4378 8729e0d7 Iustin Pop
4379 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4380 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4381 72737a7f Iustin Pop
                                       instance.hypervisor)
4382 8729e0d7 Iustin Pop
    for node in nodenames:
4383 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4384 8729e0d7 Iustin Pop
      if not info:
4385 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4386 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4387 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4388 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4389 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4390 8729e0d7 Iustin Pop
                                   " node %s" % node)
4391 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4392 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4393 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4394 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4395 8729e0d7 Iustin Pop
4396 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4397 8729e0d7 Iustin Pop
    """Execute disk grow.
4398 8729e0d7 Iustin Pop

4399 8729e0d7 Iustin Pop
    """
4400 8729e0d7 Iustin Pop
    instance = self.instance
4401 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
4402 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4403 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4404 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
4405 72737a7f Iustin Pop
      if (not result or not isinstance(result, (list, tuple)) or
4406 72737a7f Iustin Pop
          len(result) != 2):
4407 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4408 8729e0d7 Iustin Pop
      elif not result[0]:
4409 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4410 8729e0d7 Iustin Pop
                                 (node, result[1]))
4411 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4412 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4413 6605411d Iustin Pop
    if self.op.wait_for_sync:
4414 6605411d Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, instance, self.proc)
4415 6605411d Iustin Pop
      if disk_abort:
4416 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
4417 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
4418 8729e0d7 Iustin Pop
4419 8729e0d7 Iustin Pop
4420 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4421 a8083063 Iustin Pop
  """Query runtime instance data.
4422 a8083063 Iustin Pop

4423 a8083063 Iustin Pop
  """
4424 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
4425 a987fa48 Guido Trotter
  REQ_BGL = False
4426 ae5849b5 Michael Hanselmann
4427 a987fa48 Guido Trotter
  def ExpandNames(self):
4428 a987fa48 Guido Trotter
    self.needed_locks = {}
4429 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4430 a987fa48 Guido Trotter
4431 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
4432 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4433 a987fa48 Guido Trotter
4434 a987fa48 Guido Trotter
    if self.op.instances:
4435 a987fa48 Guido Trotter
      self.wanted_names = []
4436 a987fa48 Guido Trotter
      for name in self.op.instances:
4437 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
4438 a987fa48 Guido Trotter
        if full_name is None:
4439 a987fa48 Guido Trotter
          raise errors.OpPrereqError("Instance '%s' not known" %
4440 a987fa48 Guido Trotter
                                     self.op.instance_name)
4441 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
4442 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4443 a987fa48 Guido Trotter
    else:
4444 a987fa48 Guido Trotter
      self.wanted_names = None
4445 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4446 a987fa48 Guido Trotter
4447 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4448 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4449 a987fa48 Guido Trotter
4450 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
4451 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
4452 a987fa48 Guido Trotter
      self._LockInstancesNodes()
4453 a8083063 Iustin Pop
4454 a8083063 Iustin Pop
  def CheckPrereq(self):
4455 a8083063 Iustin Pop
    """Check prerequisites.
4456 a8083063 Iustin Pop

4457 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4458 a8083063 Iustin Pop

4459 a8083063 Iustin Pop
    """
4460 a987fa48 Guido Trotter
    if self.wanted_names is None:
4461 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4462 a8083063 Iustin Pop
4463 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4464 a987fa48 Guido Trotter
                             in self.wanted_names]
4465 a987fa48 Guido Trotter
    return
4466 a8083063 Iustin Pop
4467 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4468 a8083063 Iustin Pop
    """Compute block device status.
4469 a8083063 Iustin Pop

4470 a8083063 Iustin Pop
    """
4471 57821cac Iustin Pop
    static = self.op.static
4472 57821cac Iustin Pop
    if not static:
4473 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
4474 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
4475 57821cac Iustin Pop
    else:
4476 57821cac Iustin Pop
      dev_pstatus = None
4477 57821cac Iustin Pop
4478 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4479 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4480 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4481 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4482 a8083063 Iustin Pop
      else:
4483 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4484 a8083063 Iustin Pop
4485 57821cac Iustin Pop
    if snode and not static:
4486 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4487 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
4488 a8083063 Iustin Pop
    else:
4489 a8083063 Iustin Pop
      dev_sstatus = None
4490 a8083063 Iustin Pop
4491 a8083063 Iustin Pop
    if dev.children:
4492 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4493 a8083063 Iustin Pop
                      for child in dev.children]
4494 a8083063 Iustin Pop
    else:
4495 a8083063 Iustin Pop
      dev_children = []
4496 a8083063 Iustin Pop
4497 a8083063 Iustin Pop
    data = {
4498 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4499 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4500 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4501 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4502 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4503 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4504 a8083063 Iustin Pop
      "children": dev_children,
4505 a8083063 Iustin Pop
      }
4506 a8083063 Iustin Pop
4507 a8083063 Iustin Pop
    return data
4508 a8083063 Iustin Pop
4509 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4510 a8083063 Iustin Pop
    """Gather and return data"""
4511 a8083063 Iustin Pop
    result = {}
4512 338e51e8 Iustin Pop
4513 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4514 338e51e8 Iustin Pop
4515 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4516 57821cac Iustin Pop
      if not self.op.static:
4517 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
4518 57821cac Iustin Pop
                                                  instance.name,
4519 57821cac Iustin Pop
                                                  instance.hypervisor)
4520 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
4521 57821cac Iustin Pop
          remote_state = "up"
4522 57821cac Iustin Pop
        else:
4523 57821cac Iustin Pop
          remote_state = "down"
4524 a8083063 Iustin Pop
      else:
4525 57821cac Iustin Pop
        remote_state = None
4526 a8083063 Iustin Pop
      if instance.status == "down":
4527 a8083063 Iustin Pop
        config_state = "down"
4528 a8083063 Iustin Pop
      else:
4529 a8083063 Iustin Pop
        config_state = "up"
4530 a8083063 Iustin Pop
4531 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4532 a8083063 Iustin Pop
               for device in instance.disks]
4533 a8083063 Iustin Pop
4534 a8083063 Iustin Pop
      idict = {
4535 a8083063 Iustin Pop
        "name": instance.name,
4536 a8083063 Iustin Pop
        "config_state": config_state,
4537 a8083063 Iustin Pop
        "run_state": remote_state,
4538 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4539 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4540 a8083063 Iustin Pop
        "os": instance.os,
4541 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4542 a8083063 Iustin Pop
        "disks": disks,
4543 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
4544 24838135 Iustin Pop
        "network_port": instance.network_port,
4545 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
4546 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
4547 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
4548 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
4549 a8083063 Iustin Pop
        }
4550 a8083063 Iustin Pop
4551 a8083063 Iustin Pop
      result[instance.name] = idict
4552 a8083063 Iustin Pop
4553 a8083063 Iustin Pop
    return result
4554 a8083063 Iustin Pop
4555 a8083063 Iustin Pop
4556 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4557 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4558 a8083063 Iustin Pop

4559 a8083063 Iustin Pop
  """
4560 a8083063 Iustin Pop
  HPATH = "instance-modify"
4561 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4562 74409b12 Iustin Pop
  _OP_REQP = ["instance_name", "hvparams"]
4563 1a5c7281 Guido Trotter
  REQ_BGL = False
4564 1a5c7281 Guido Trotter
4565 1a5c7281 Guido Trotter
  def ExpandNames(self):
4566 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4567 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
4568 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4569 74409b12 Iustin Pop
4570 74409b12 Iustin Pop
4571 74409b12 Iustin Pop
  def DeclareLocks(self, level):
4572 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
4573 74409b12 Iustin Pop
      self._LockInstancesNodes()
4574 a8083063 Iustin Pop
4575 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4576 a8083063 Iustin Pop
    """Build hooks env.
4577 a8083063 Iustin Pop

4578 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4579 a8083063 Iustin Pop

4580 a8083063 Iustin Pop
    """
4581 396e1b78 Michael Hanselmann
    args = dict()
4582 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
4583 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
4584 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
4585 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
4586 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4587 396e1b78 Michael Hanselmann
      if self.do_ip:
4588 396e1b78 Michael Hanselmann
        ip = self.ip
4589 396e1b78 Michael Hanselmann
      else:
4590 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4591 396e1b78 Michael Hanselmann
      if self.bridge:
4592 396e1b78 Michael Hanselmann
        bridge = self.bridge
4593 396e1b78 Michael Hanselmann
      else:
4594 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4595 ef756965 Iustin Pop
      if self.mac:
4596 ef756965 Iustin Pop
        mac = self.mac
4597 ef756965 Iustin Pop
      else:
4598 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4599 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4600 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
4601 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(),
4602 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4603 a8083063 Iustin Pop
    return env, nl, nl
4604 a8083063 Iustin Pop
4605 a8083063 Iustin Pop
  def CheckPrereq(self):
4606 a8083063 Iustin Pop
    """Check prerequisites.
4607 a8083063 Iustin Pop

4608 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4609 a8083063 Iustin Pop

4610 a8083063 Iustin Pop
    """
4611 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4612 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4613 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4614 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4615 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4616 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4617 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4618 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4619 4300c4b6 Guido Trotter
    self.force = getattr(self.op, "force", None)
4620 338e51e8 Iustin Pop
    all_parms = [self.ip, self.bridge, self.mac]
4621 338e51e8 Iustin Pop
    if (all_parms.count(None) == len(all_parms) and
4622 338e51e8 Iustin Pop
        not self.op.hvparams and
4623 338e51e8 Iustin Pop
        not self.op.beparams):
4624 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4625 338e51e8 Iustin Pop
    for item in (constants.BE_MEMORY, constants.BE_VCPUS):
4626 338e51e8 Iustin Pop
      val = self.op.beparams.get(item, None)
4627 338e51e8 Iustin Pop
      if val is not None:
4628 338e51e8 Iustin Pop
        try:
4629 338e51e8 Iustin Pop
          val = int(val)
4630 338e51e8 Iustin Pop
        except ValueError, err:
4631 338e51e8 Iustin Pop
          raise errors.OpPrereqError("Invalid %s size: %s" % (item, str(err)))
4632 338e51e8 Iustin Pop
        self.op.beparams[item] = val
4633 a8083063 Iustin Pop
    if self.ip is not None:
4634 a8083063 Iustin Pop
      self.do_ip = True
4635 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4636 a8083063 Iustin Pop
        self.ip = None
4637 a8083063 Iustin Pop
      else:
4638 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4639 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4640 a8083063 Iustin Pop
    else:
4641 a8083063 Iustin Pop
      self.do_ip = False
4642 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4643 1862d460 Alexander Schreiber
    if self.mac is not None:
4644 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4645 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4646 1862d460 Alexander Schreiber
                                   self.mac)
4647 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4648 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4649 a8083063 Iustin Pop
4650 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
4651 31a853d2 Iustin Pop
4652 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4653 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4654 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4655 74409b12 Iustin Pop
    pnode = self.instance.primary_node
4656 74409b12 Iustin Pop
    nodelist = [pnode]
4657 74409b12 Iustin Pop
    nodelist.extend(instance.secondary_nodes)
4658 74409b12 Iustin Pop
4659 338e51e8 Iustin Pop
    # hvparams processing
4660 74409b12 Iustin Pop
    if self.op.hvparams:
4661 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
4662 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
4663 74409b12 Iustin Pop
        if val is None:
4664 74409b12 Iustin Pop
          try:
4665 74409b12 Iustin Pop
            del i_hvdict[key]
4666 74409b12 Iustin Pop
          except KeyError:
4667 74409b12 Iustin Pop
            pass
4668 74409b12 Iustin Pop
        else:
4669 74409b12 Iustin Pop
          i_hvdict[key] = val
4670 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4671 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
4672 74409b12 Iustin Pop
                                i_hvdict)
4673 74409b12 Iustin Pop
      # local check
4674 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
4675 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
4676 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
4677 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
4678 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
4679 338e51e8 Iustin Pop
    else:
4680 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
4681 338e51e8 Iustin Pop
4682 338e51e8 Iustin Pop
    # beparams processing
4683 338e51e8 Iustin Pop
    if self.op.beparams:
4684 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
4685 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
4686 338e51e8 Iustin Pop
        if val is None:
4687 338e51e8 Iustin Pop
          try:
4688 338e51e8 Iustin Pop
            del i_bedict[key]
4689 338e51e8 Iustin Pop
          except KeyError:
4690 338e51e8 Iustin Pop
            pass
4691 338e51e8 Iustin Pop
        else:
4692 338e51e8 Iustin Pop
          i_bedict[key] = val
4693 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4694 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4695 338e51e8 Iustin Pop
                                i_bedict)
4696 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
4697 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
4698 338e51e8 Iustin Pop
    else:
4699 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
4700 74409b12 Iustin Pop
4701 cfefe007 Guido Trotter
    self.warn = []
4702 647a5d80 Iustin Pop
4703 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
4704 647a5d80 Iustin Pop
      mem_check_list = [pnode]
4705 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
4706 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
4707 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
4708 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
4709 72737a7f Iustin Pop
                                                  instance.hypervisor)
4710 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
4711 72737a7f Iustin Pop
                                         instance.hypervisor)
4712 cfefe007 Guido Trotter
4713 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4714 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4715 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4716 cfefe007 Guido Trotter
      else:
4717 cfefe007 Guido Trotter
        if instance_info:
4718 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4719 cfefe007 Guido Trotter
        else:
4720 cfefe007 Guido Trotter
          # Assume instance not running
4721 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4722 cfefe007 Guido Trotter
          # and we have no other way to check)
4723 cfefe007 Guido Trotter
          current_mem = 0
4724 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
4725 338e51e8 Iustin Pop
                    nodeinfo[pnode]['memory_free'])
4726 cfefe007 Guido Trotter
        if miss_mem > 0:
4727 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4728 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4729 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4730 cfefe007 Guido Trotter
4731 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
4732 647a5d80 Iustin Pop
        for node in instance.secondary_nodes:
4733 647a5d80 Iustin Pop
          if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4734 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
4735 647a5d80 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nodeinfo[node]['memory_free']:
4736 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
4737 647a5d80 Iustin Pop
                             " secondary node %s" % node)
4738 5bc84f33 Alexander Schreiber
4739 a8083063 Iustin Pop
    return
4740 a8083063 Iustin Pop
4741 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4742 a8083063 Iustin Pop
    """Modifies an instance.
4743 a8083063 Iustin Pop

4744 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4745 a8083063 Iustin Pop
    """
4746 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4747 cfefe007 Guido Trotter
    # feedback_fn there.
4748 cfefe007 Guido Trotter
    for warn in self.warn:
4749 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4750 cfefe007 Guido Trotter
4751 a8083063 Iustin Pop
    result = []
4752 a8083063 Iustin Pop
    instance = self.instance
4753 a8083063 Iustin Pop
    if self.do_ip:
4754 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4755 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4756 a8083063 Iustin Pop
    if self.bridge:
4757 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4758 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4759 1862d460 Alexander Schreiber
    if self.mac:
4760 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4761 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4762 74409b12 Iustin Pop
    if self.op.hvparams:
4763 74409b12 Iustin Pop
      instance.hvparams = self.hv_new
4764 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
4765 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
4766 338e51e8 Iustin Pop
    if self.op.beparams:
4767 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
4768 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
4769 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
4770 a8083063 Iustin Pop
4771 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4772 a8083063 Iustin Pop
4773 a8083063 Iustin Pop
    return result
4774 a8083063 Iustin Pop
4775 a8083063 Iustin Pop
4776 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4777 a8083063 Iustin Pop
  """Query the exports list
4778 a8083063 Iustin Pop

4779 a8083063 Iustin Pop
  """
4780 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
4781 21a15682 Guido Trotter
  REQ_BGL = False
4782 21a15682 Guido Trotter
4783 21a15682 Guido Trotter
  def ExpandNames(self):
4784 21a15682 Guido Trotter
    self.needed_locks = {}
4785 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4786 21a15682 Guido Trotter
    if not self.op.nodes:
4787 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4788 21a15682 Guido Trotter
    else:
4789 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
4790 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
4791 a8083063 Iustin Pop
4792 a8083063 Iustin Pop
  def CheckPrereq(self):
4793 21a15682 Guido Trotter
    """Check prerequisites.
4794 a8083063 Iustin Pop

4795 a8083063 Iustin Pop
    """
4796 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
4797 a8083063 Iustin Pop
4798 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4799 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4800 a8083063 Iustin Pop

4801 a8083063 Iustin Pop
    Returns:
4802 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4803 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4804 a8083063 Iustin Pop
      that node.
4805 a8083063 Iustin Pop

4806 a8083063 Iustin Pop
    """
4807 72737a7f Iustin Pop
    return self.rpc.call_export_list(self.nodes)
4808 a8083063 Iustin Pop
4809 a8083063 Iustin Pop
4810 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4811 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4812 a8083063 Iustin Pop

4813 a8083063 Iustin Pop
  """
4814 a8083063 Iustin Pop
  HPATH = "instance-export"
4815 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4816 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4817 6657590e Guido Trotter
  REQ_BGL = False
4818 6657590e Guido Trotter
4819 6657590e Guido Trotter
  def ExpandNames(self):
4820 6657590e Guido Trotter
    self._ExpandAndLockInstance()
4821 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
4822 6657590e Guido Trotter
    #
4823 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
4824 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
4825 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
4826 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
4827 6657590e Guido Trotter
    #    then one to remove, after
4828 6657590e Guido Trotter
    #  - removing the removal operation altoghether
4829 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4830 6657590e Guido Trotter
4831 6657590e Guido Trotter
  def DeclareLocks(self, level):
4832 6657590e Guido Trotter
    """Last minute lock declaration."""
4833 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
4834 a8083063 Iustin Pop
4835 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4836 a8083063 Iustin Pop
    """Build hooks env.
4837 a8083063 Iustin Pop

4838 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4839 a8083063 Iustin Pop

4840 a8083063 Iustin Pop
    """
4841 a8083063 Iustin Pop
    env = {
4842 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4843 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4844 a8083063 Iustin Pop
      }
4845 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4846 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
4847 a8083063 Iustin Pop
          self.op.target_node]
4848 a8083063 Iustin Pop
    return env, nl, nl
4849 a8083063 Iustin Pop
4850 a8083063 Iustin Pop
  def CheckPrereq(self):
4851 a8083063 Iustin Pop
    """Check prerequisites.
4852 a8083063 Iustin Pop

4853 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4854 a8083063 Iustin Pop

4855 a8083063 Iustin Pop
    """
4856 6657590e Guido Trotter
    instance_name = self.op.instance_name
4857 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4858 6657590e Guido Trotter
    assert self.instance is not None, \
4859 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
4860 a8083063 Iustin Pop
4861 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
4862 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
4863 a8083063 Iustin Pop
4864 6657590e Guido Trotter
    assert self.dst_node is not None, \
4865 6657590e Guido Trotter
          "Cannot retrieve locked node %s" % self.op.target_node
4866 a8083063 Iustin Pop
4867 b6023d6c Manuel Franceschini
    # instance disk type verification
4868 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4869 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4870 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4871 b6023d6c Manuel Franceschini
                                   " file-based disks")
4872 b6023d6c Manuel Franceschini
4873 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4874 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4875 a8083063 Iustin Pop

4876 a8083063 Iustin Pop
    """
4877 a8083063 Iustin Pop
    instance = self.instance
4878 a8083063 Iustin Pop
    dst_node = self.dst_node
4879 a8083063 Iustin Pop
    src_node = instance.primary_node
4880 a8083063 Iustin Pop
    if self.op.shutdown:
4881 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4882 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(src_node, instance):
4883 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4884 38206f3c Iustin Pop
                                 (instance.name, src_node))
4885 a8083063 Iustin Pop
4886 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4887 a8083063 Iustin Pop
4888 a8083063 Iustin Pop
    snap_disks = []
4889 a8083063 Iustin Pop
4890 a8083063 Iustin Pop
    try:
4891 a8083063 Iustin Pop
      for disk in instance.disks:
4892 19d7f90a Guido Trotter
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4893 19d7f90a Guido Trotter
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
4894 a8083063 Iustin Pop
4895 19d7f90a Guido Trotter
        if not new_dev_name:
4896 19d7f90a Guido Trotter
          self.LogWarning("Could not snapshot block device %s on node %s",
4897 9a4f63d1 Iustin Pop
                          disk.logical_id[1], src_node)
4898 19d7f90a Guido Trotter
          snap_disks.append(False)
4899 19d7f90a Guido Trotter
        else:
4900 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4901 19d7f90a Guido Trotter
                                 logical_id=(vgname, new_dev_name),
4902 19d7f90a Guido Trotter
                                 physical_id=(vgname, new_dev_name),
4903 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
4904 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
4905 a8083063 Iustin Pop
4906 a8083063 Iustin Pop
    finally:
4907 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4908 72737a7f Iustin Pop
        if not self.rpc.call_instance_start(src_node, instance, None):
4909 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
4910 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4911 a8083063 Iustin Pop
4912 a8083063 Iustin Pop
    # TODO: check for size
4913 a8083063 Iustin Pop
4914 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
4915 a8083063 Iustin Pop
    for dev in snap_disks:
4916 19d7f90a Guido Trotter
      if dev:
4917 19d7f90a Guido Trotter
        if not self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
4918 19d7f90a Guido Trotter
                                             instance, cluster_name):
4919 19d7f90a Guido Trotter
          self.LogWarning("Could not export block device %s from node %s to"
4920 19d7f90a Guido Trotter
                          " node %s", dev.logical_id[1], src_node,
4921 19d7f90a Guido Trotter
                          dst_node.name)
4922 19d7f90a Guido Trotter
        if not self.rpc.call_blockdev_remove(src_node, dev):
4923 19d7f90a Guido Trotter
          self.LogWarning("Could not remove snapshot block device %s from node"
4924 19d7f90a Guido Trotter
                          " %s", dev.logical_id[1], src_node)
4925 a8083063 Iustin Pop
4926 72737a7f Iustin Pop
    if not self.rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4927 19d7f90a Guido Trotter
      self.LogWarning("Could not finalize export for instance %s on node %s",
4928 19d7f90a Guido Trotter
                      instance.name, dst_node.name)
4929 a8083063 Iustin Pop
4930 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4931 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4932 a8083063 Iustin Pop
4933 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4934 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4935 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4936 a8083063 Iustin Pop
    if nodelist:
4937 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
4938 a8083063 Iustin Pop
      for node in exportlist:
4939 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4940 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
4941 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
4942 19d7f90a Guido Trotter
                            " on node %s", instance.name, node)
4943 5c947f38 Iustin Pop
4944 5c947f38 Iustin Pop
4945 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4946 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4947 9ac99fda Guido Trotter

4948 9ac99fda Guido Trotter
  """
4949 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4950 3656b3af Guido Trotter
  REQ_BGL = False
4951 3656b3af Guido Trotter
4952 3656b3af Guido Trotter
  def ExpandNames(self):
4953 3656b3af Guido Trotter
    self.needed_locks = {}
4954 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
4955 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
4956 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
4957 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4958 9ac99fda Guido Trotter
4959 9ac99fda Guido Trotter
  def CheckPrereq(self):
4960 9ac99fda Guido Trotter
    """Check prerequisites.
4961 9ac99fda Guido Trotter
    """
4962 9ac99fda Guido Trotter
    pass
4963 9ac99fda Guido Trotter
4964 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4965 9ac99fda Guido Trotter
    """Remove any export.
4966 9ac99fda Guido Trotter

4967 9ac99fda Guido Trotter
    """
4968 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4969 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4970 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4971 9ac99fda Guido Trotter
    fqdn_warn = False
4972 9ac99fda Guido Trotter
    if not instance_name:
4973 9ac99fda Guido Trotter
      fqdn_warn = True
4974 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4975 9ac99fda Guido Trotter
4976 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
4977 72737a7f Iustin Pop
      locking.LEVEL_NODE])
4978 9ac99fda Guido Trotter
    found = False
4979 9ac99fda Guido Trotter
    for node in exportlist:
4980 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4981 9ac99fda Guido Trotter
        found = True
4982 72737a7f Iustin Pop
        if not self.rpc.call_export_remove(node, instance_name):
4983 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
4984 9a4f63d1 Iustin Pop
                        " on node %s", instance_name, node)
4985 9ac99fda Guido Trotter
4986 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4987 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4988 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4989 9ac99fda Guido Trotter
                  " Domain Name.")
4990 9ac99fda Guido Trotter
4991 9ac99fda Guido Trotter
4992 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4993 5c947f38 Iustin Pop
  """Generic tags LU.
4994 5c947f38 Iustin Pop

4995 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4996 5c947f38 Iustin Pop

4997 5c947f38 Iustin Pop
  """
4998 5c947f38 Iustin Pop
4999 8646adce Guido Trotter
  def ExpandNames(self):
5000 8646adce Guido Trotter
    self.needed_locks = {}
5001 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
5002 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
5003 5c947f38 Iustin Pop
      if name is None:
5004 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
5005 3ecf6786 Iustin Pop
                                   (self.op.name,))
5006 5c947f38 Iustin Pop
      self.op.name = name
5007 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
5008 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
5009 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
5010 5c947f38 Iustin Pop
      if name is None:
5011 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
5012 3ecf6786 Iustin Pop
                                   (self.op.name,))
5013 5c947f38 Iustin Pop
      self.op.name = name
5014 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
5015 8646adce Guido Trotter
5016 8646adce Guido Trotter
  def CheckPrereq(self):
5017 8646adce Guido Trotter
    """Check prerequisites.
5018 8646adce Guido Trotter

5019 8646adce Guido Trotter
    """
5020 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
5021 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
5022 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
5023 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
5024 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
5025 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
5026 5c947f38 Iustin Pop
    else:
5027 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
5028 3ecf6786 Iustin Pop
                                 str(self.op.kind))
5029 5c947f38 Iustin Pop
5030 5c947f38 Iustin Pop
5031 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
5032 5c947f38 Iustin Pop
  """Returns the tags of a given object.
5033 5c947f38 Iustin Pop

5034 5c947f38 Iustin Pop
  """
5035 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
5036 8646adce Guido Trotter
  REQ_BGL = False
5037 5c947f38 Iustin Pop
5038 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5039 5c947f38 Iustin Pop
    """Returns the tag list.
5040 5c947f38 Iustin Pop

5041 5c947f38 Iustin Pop
    """
5042 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
5043 5c947f38 Iustin Pop
5044 5c947f38 Iustin Pop
5045 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
5046 73415719 Iustin Pop
  """Searches the tags for a given pattern.
5047 73415719 Iustin Pop

5048 73415719 Iustin Pop
  """
5049 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
5050 8646adce Guido Trotter
  REQ_BGL = False
5051 8646adce Guido Trotter
5052 8646adce Guido Trotter
  def ExpandNames(self):
5053 8646adce Guido Trotter
    self.needed_locks = {}
5054 73415719 Iustin Pop
5055 73415719 Iustin Pop
  def CheckPrereq(self):
5056 73415719 Iustin Pop
    """Check prerequisites.
5057 73415719 Iustin Pop

5058 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
5059 73415719 Iustin Pop

5060 73415719 Iustin Pop
    """
5061 73415719 Iustin Pop
    try:
5062 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
5063 73415719 Iustin Pop
    except re.error, err:
5064 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
5065 73415719 Iustin Pop
                                 (self.op.pattern, err))
5066 73415719 Iustin Pop
5067 73415719 Iustin Pop
  def Exec(self, feedback_fn):
5068 73415719 Iustin Pop
    """Returns the tag list.
5069 73415719 Iustin Pop

5070 73415719 Iustin Pop
    """
5071 73415719 Iustin Pop
    cfg = self.cfg
5072 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
5073 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
5074 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
5075 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
5076 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
5077 73415719 Iustin Pop
    results = []
5078 73415719 Iustin Pop
    for path, target in tgts:
5079 73415719 Iustin Pop
      for tag in target.GetTags():
5080 73415719 Iustin Pop
        if self.re.search(tag):
5081 73415719 Iustin Pop
          results.append((path, tag))
5082 73415719 Iustin Pop
    return results
5083 73415719 Iustin Pop
5084 73415719 Iustin Pop
5085 f27302fa Iustin Pop
class LUAddTags(TagsLU):
5086 5c947f38 Iustin Pop
  """Sets a tag on a given object.
5087 5c947f38 Iustin Pop

5088 5c947f38 Iustin Pop
  """
5089 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5090 8646adce Guido Trotter
  REQ_BGL = False
5091 5c947f38 Iustin Pop
5092 5c947f38 Iustin Pop
  def CheckPrereq(self):
5093 5c947f38 Iustin Pop
    """Check prerequisites.
5094 5c947f38 Iustin Pop

5095 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
5096 5c947f38 Iustin Pop

5097 5c947f38 Iustin Pop
    """
5098 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5099 f27302fa Iustin Pop
    for tag in self.op.tags:
5100 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5101 5c947f38 Iustin Pop
5102 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5103 5c947f38 Iustin Pop
    """Sets the tag.
5104 5c947f38 Iustin Pop

5105 5c947f38 Iustin Pop
    """
5106 5c947f38 Iustin Pop
    try:
5107 f27302fa Iustin Pop
      for tag in self.op.tags:
5108 f27302fa Iustin Pop
        self.target.AddTag(tag)
5109 5c947f38 Iustin Pop
    except errors.TagError, err:
5110 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
5111 5c947f38 Iustin Pop
    try:
5112 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5113 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5114 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5115 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5116 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5117 5c947f38 Iustin Pop
5118 5c947f38 Iustin Pop
5119 f27302fa Iustin Pop
class LUDelTags(TagsLU):
5120 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
5121 5c947f38 Iustin Pop

5122 5c947f38 Iustin Pop
  """
5123 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5124 8646adce Guido Trotter
  REQ_BGL = False
5125 5c947f38 Iustin Pop
5126 5c947f38 Iustin Pop
  def CheckPrereq(self):
5127 5c947f38 Iustin Pop
    """Check prerequisites.
5128 5c947f38 Iustin Pop

5129 5c947f38 Iustin Pop
    This checks that we have the given tag.
5130 5c947f38 Iustin Pop

5131 5c947f38 Iustin Pop
    """
5132 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5133 f27302fa Iustin Pop
    for tag in self.op.tags:
5134 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5135 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
5136 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
5137 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
5138 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
5139 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
5140 f27302fa Iustin Pop
      diff_names.sort()
5141 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
5142 f27302fa Iustin Pop
                                 (",".join(diff_names)))
5143 5c947f38 Iustin Pop
5144 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5145 5c947f38 Iustin Pop
    """Remove the tag from the object.
5146 5c947f38 Iustin Pop

5147 5c947f38 Iustin Pop
    """
5148 f27302fa Iustin Pop
    for tag in self.op.tags:
5149 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
5150 5c947f38 Iustin Pop
    try:
5151 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5152 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5153 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5154 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5155 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5156 06009e27 Iustin Pop
5157 0eed6e61 Guido Trotter
5158 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
5159 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
5160 06009e27 Iustin Pop

5161 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
5162 06009e27 Iustin Pop
  time.
5163 06009e27 Iustin Pop

5164 06009e27 Iustin Pop
  """
5165 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5166 fbe9022f Guido Trotter
  REQ_BGL = False
5167 06009e27 Iustin Pop
5168 fbe9022f Guido Trotter
  def ExpandNames(self):
5169 fbe9022f Guido Trotter
    """Expand names and set required locks.
5170 06009e27 Iustin Pop

5171 fbe9022f Guido Trotter
    This expands the node list, if any.
5172 06009e27 Iustin Pop

5173 06009e27 Iustin Pop
    """
5174 fbe9022f Guido Trotter
    self.needed_locks = {}
5175 06009e27 Iustin Pop
    if self.op.on_nodes:
5176 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
5177 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5178 fbe9022f Guido Trotter
      # more information.
5179 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5180 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5181 fbe9022f Guido Trotter
5182 fbe9022f Guido Trotter
  def CheckPrereq(self):
5183 fbe9022f Guido Trotter
    """Check prerequisites.
5184 fbe9022f Guido Trotter

5185 fbe9022f Guido Trotter
    """
5186 06009e27 Iustin Pop
5187 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
5188 06009e27 Iustin Pop
    """Do the actual sleep.
5189 06009e27 Iustin Pop

5190 06009e27 Iustin Pop
    """
5191 06009e27 Iustin Pop
    if self.op.on_master:
5192 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
5193 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
5194 06009e27 Iustin Pop
    if self.op.on_nodes:
5195 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5196 06009e27 Iustin Pop
      if not result:
5197 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
5198 06009e27 Iustin Pop
      for node, node_result in result.items():
5199 06009e27 Iustin Pop
        if not node_result:
5200 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
5201 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
5202 d61df03e Iustin Pop
5203 d61df03e Iustin Pop
5204 d1c2dd75 Iustin Pop
class IAllocator(object):
5205 d1c2dd75 Iustin Pop
  """IAllocator framework.
5206 d61df03e Iustin Pop

5207 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
5208 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
5209 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
5210 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
5211 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
5212 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
5213 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
5214 d1c2dd75 Iustin Pop
      easy usage
5215 d61df03e Iustin Pop

5216 d61df03e Iustin Pop
  """
5217 29859cb7 Iustin Pop
  _ALLO_KEYS = [
5218 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
5219 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
5220 d1c2dd75 Iustin Pop
    ]
5221 29859cb7 Iustin Pop
  _RELO_KEYS = [
5222 29859cb7 Iustin Pop
    "relocate_from",
5223 29859cb7 Iustin Pop
    ]
5224 d1c2dd75 Iustin Pop
5225 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
5226 72737a7f Iustin Pop
    self.lu = lu
5227 d1c2dd75 Iustin Pop
    # init buffer variables
5228 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
5229 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
5230 29859cb7 Iustin Pop
    self.mode = mode
5231 29859cb7 Iustin Pop
    self.name = name
5232 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
5233 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
5234 29859cb7 Iustin Pop
    self.relocate_from = None
5235 27579978 Iustin Pop
    # computed fields
5236 27579978 Iustin Pop
    self.required_nodes = None
5237 d1c2dd75 Iustin Pop
    # init result fields
5238 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
5239 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5240 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
5241 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5242 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
5243 29859cb7 Iustin Pop
    else:
5244 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5245 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
5246 d1c2dd75 Iustin Pop
    for key in kwargs:
5247 29859cb7 Iustin Pop
      if key not in keyset:
5248 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5249 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5250 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
5251 29859cb7 Iustin Pop
    for key in keyset:
5252 d1c2dd75 Iustin Pop
      if key not in kwargs:
5253 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5254 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5255 d1c2dd75 Iustin Pop
    self._BuildInputData()
5256 d1c2dd75 Iustin Pop
5257 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
5258 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
5259 d1c2dd75 Iustin Pop

5260 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
5261 d1c2dd75 Iustin Pop

5262 d1c2dd75 Iustin Pop
    """
5263 72737a7f Iustin Pop
    cfg = self.lu.cfg
5264 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
5265 d1c2dd75 Iustin Pop
    # cluster data
5266 d1c2dd75 Iustin Pop
    data = {
5267 d1c2dd75 Iustin Pop
      "version": 1,
5268 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
5269 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
5270 e69d05fd Iustin Pop
      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5271 d1c2dd75 Iustin Pop
      # we don't have job IDs
5272 d61df03e Iustin Pop
      }
5273 d61df03e Iustin Pop
5274 338e51e8 Iustin Pop
    i_list = []
5275 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5276 338e51e8 Iustin Pop
    for iname in cfg.GetInstanceList():
5277 338e51e8 Iustin Pop
      i_obj = cfg.GetInstanceInfo(iname)
5278 338e51e8 Iustin Pop
      i_list.append((i_obj, cluster.FillBE(i_obj)))
5279 6286519f Iustin Pop
5280 d1c2dd75 Iustin Pop
    # node data
5281 d1c2dd75 Iustin Pop
    node_results = {}
5282 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
5283 e69d05fd Iustin Pop
    # FIXME: here we have only one hypervisor information, but
5284 e69d05fd Iustin Pop
    # instance can belong to different hypervisors
5285 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
5286 72737a7f Iustin Pop
                                           cfg.GetHypervisorType())
5287 d1c2dd75 Iustin Pop
    for nname in node_list:
5288 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
5289 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
5290 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
5291 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
5292 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5293 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
5294 d1c2dd75 Iustin Pop
        if attr not in remote_info:
5295 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5296 d1c2dd75 Iustin Pop
                                   (nname, attr))
5297 d1c2dd75 Iustin Pop
        try:
5298 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
5299 d1c2dd75 Iustin Pop
        except ValueError, err:
5300 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5301 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
5302 6286519f Iustin Pop
      # compute memory used by primary instances
5303 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
5304 338e51e8 Iustin Pop
      for iinfo, beinfo in i_list:
5305 6286519f Iustin Pop
        if iinfo.primary_node == nname:
5306 338e51e8 Iustin Pop
          i_p_mem += beinfo[constants.BE_MEMORY]
5307 6286519f Iustin Pop
          if iinfo.status == "up":
5308 338e51e8 Iustin Pop
            i_p_up_mem += beinfo[constants.BE_MEMORY]
5309 6286519f Iustin Pop
5310 b2662e7f Iustin Pop
      # compute memory used by instances
5311 d1c2dd75 Iustin Pop
      pnr = {
5312 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
5313 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
5314 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
5315 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
5316 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
5317 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
5318 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
5319 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
5320 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
5321 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5322 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5323 d1c2dd75 Iustin Pop
        }
5324 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5325 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5326 d1c2dd75 Iustin Pop
5327 d1c2dd75 Iustin Pop
    # instance data
5328 d1c2dd75 Iustin Pop
    instance_data = {}
5329 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
5330 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5331 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5332 d1c2dd75 Iustin Pop
      pir = {
5333 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5334 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5335 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
5336 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
5337 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5338 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5339 d1c2dd75 Iustin Pop
        "nics": nic_data,
5340 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5341 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5342 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
5343 d1c2dd75 Iustin Pop
        }
5344 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5345 d61df03e Iustin Pop
5346 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5347 d61df03e Iustin Pop
5348 d1c2dd75 Iustin Pop
    self.in_data = data
5349 d61df03e Iustin Pop
5350 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5351 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5352 d61df03e Iustin Pop

5353 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5354 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5355 d61df03e Iustin Pop

5356 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5357 d1c2dd75 Iustin Pop
    done.
5358 d61df03e Iustin Pop

5359 d1c2dd75 Iustin Pop
    """
5360 d1c2dd75 Iustin Pop
    data = self.in_data
5361 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5362 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5363 d1c2dd75 Iustin Pop
5364 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
5365 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
5366 d1c2dd75 Iustin Pop
5367 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5368 27579978 Iustin Pop
      self.required_nodes = 2
5369 27579978 Iustin Pop
    else:
5370 27579978 Iustin Pop
      self.required_nodes = 1
5371 d1c2dd75 Iustin Pop
    request = {
5372 d1c2dd75 Iustin Pop
      "type": "allocate",
5373 d1c2dd75 Iustin Pop
      "name": self.name,
5374 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5375 d1c2dd75 Iustin Pop
      "tags": self.tags,
5376 d1c2dd75 Iustin Pop
      "os": self.os,
5377 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5378 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5379 d1c2dd75 Iustin Pop
      "disks": self.disks,
5380 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5381 d1c2dd75 Iustin Pop
      "nics": self.nics,
5382 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5383 d1c2dd75 Iustin Pop
      }
5384 d1c2dd75 Iustin Pop
    data["request"] = request
5385 298fe380 Iustin Pop
5386 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5387 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5388 298fe380 Iustin Pop

5389 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5390 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5391 d61df03e Iustin Pop

5392 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5393 d1c2dd75 Iustin Pop
    done.
5394 d61df03e Iustin Pop

5395 d1c2dd75 Iustin Pop
    """
5396 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
5397 27579978 Iustin Pop
    if instance is None:
5398 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5399 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5400 27579978 Iustin Pop
5401 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5402 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5403 27579978 Iustin Pop
5404 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5405 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5406 2a139bb0 Iustin Pop
5407 27579978 Iustin Pop
    self.required_nodes = 1
5408 27579978 Iustin Pop
5409 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
5410 27579978 Iustin Pop
                                  instance.disks[0].size,
5411 27579978 Iustin Pop
                                  instance.disks[1].size)
5412 27579978 Iustin Pop
5413 d1c2dd75 Iustin Pop
    request = {
5414 2a139bb0 Iustin Pop
      "type": "relocate",
5415 d1c2dd75 Iustin Pop
      "name": self.name,
5416 27579978 Iustin Pop
      "disk_space_total": disk_space,
5417 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5418 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5419 d1c2dd75 Iustin Pop
      }
5420 27579978 Iustin Pop
    self.in_data["request"] = request
5421 d61df03e Iustin Pop
5422 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5423 d1c2dd75 Iustin Pop
    """Build input data structures.
5424 d61df03e Iustin Pop

5425 d1c2dd75 Iustin Pop
    """
5426 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5427 d61df03e Iustin Pop
5428 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5429 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5430 d1c2dd75 Iustin Pop
    else:
5431 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5432 d61df03e Iustin Pop
5433 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5434 d61df03e Iustin Pop
5435 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
5436 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5437 298fe380 Iustin Pop

5438 d1c2dd75 Iustin Pop
    """
5439 72737a7f Iustin Pop
    if call_fn is None:
5440 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
5441 d1c2dd75 Iustin Pop
    data = self.in_text
5442 298fe380 Iustin Pop
5443 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
5444 298fe380 Iustin Pop
5445 43f5ea7a Guido Trotter
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5446 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5447 8d528b7c Iustin Pop
5448 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5449 8d528b7c Iustin Pop
5450 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5451 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5452 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5453 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5454 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5455 8d528b7c Iustin Pop
    self.out_text = stdout
5456 d1c2dd75 Iustin Pop
    if validate:
5457 d1c2dd75 Iustin Pop
      self._ValidateResult()
5458 298fe380 Iustin Pop
5459 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5460 d1c2dd75 Iustin Pop
    """Process the allocator results.
5461 538475ca Iustin Pop

5462 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5463 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5464 538475ca Iustin Pop

5465 d1c2dd75 Iustin Pop
    """
5466 d1c2dd75 Iustin Pop
    try:
5467 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5468 d1c2dd75 Iustin Pop
    except Exception, err:
5469 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5470 d1c2dd75 Iustin Pop
5471 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5472 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5473 538475ca Iustin Pop
5474 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5475 d1c2dd75 Iustin Pop
      if key not in rdict:
5476 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5477 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5478 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5479 538475ca Iustin Pop
5480 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5481 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5482 d1c2dd75 Iustin Pop
                               " is not a list")
5483 d1c2dd75 Iustin Pop
    self.out_data = rdict
5484 538475ca Iustin Pop
5485 538475ca Iustin Pop
5486 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5487 d61df03e Iustin Pop
  """Run allocator tests.
5488 d61df03e Iustin Pop

5489 d61df03e Iustin Pop
  This LU runs the allocator tests
5490 d61df03e Iustin Pop

5491 d61df03e Iustin Pop
  """
5492 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5493 d61df03e Iustin Pop
5494 d61df03e Iustin Pop
  def CheckPrereq(self):
5495 d61df03e Iustin Pop
    """Check prerequisites.
5496 d61df03e Iustin Pop

5497 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5498 d61df03e Iustin Pop

5499 d61df03e Iustin Pop
    """
5500 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5501 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5502 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5503 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5504 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5505 d61df03e Iustin Pop
                                     attr)
5506 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5507 d61df03e Iustin Pop
      if iname is not None:
5508 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5509 d61df03e Iustin Pop
                                   iname)
5510 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5511 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5512 d61df03e Iustin Pop
      for row in self.op.nics:
5513 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5514 d61df03e Iustin Pop
            "mac" not in row or
5515 d61df03e Iustin Pop
            "ip" not in row or
5516 d61df03e Iustin Pop
            "bridge" not in row):
5517 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5518 d61df03e Iustin Pop
                                     " 'nics' parameter")
5519 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5520 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5521 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5522 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5523 d61df03e Iustin Pop
      for row in self.op.disks:
5524 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5525 d61df03e Iustin Pop
            "size" not in row or
5526 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5527 d61df03e Iustin Pop
            "mode" not in row or
5528 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5529 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5530 d61df03e Iustin Pop
                                     " 'disks' parameter")
5531 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5532 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5533 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5534 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5535 d61df03e Iustin Pop
      if fname is None:
5536 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5537 d61df03e Iustin Pop
                                   self.op.name)
5538 d61df03e Iustin Pop
      self.op.name = fname
5539 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5540 d61df03e Iustin Pop
    else:
5541 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5542 d61df03e Iustin Pop
                                 self.op.mode)
5543 d61df03e Iustin Pop
5544 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5545 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5546 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5547 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5548 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5549 d61df03e Iustin Pop
                                 self.op.direction)
5550 d61df03e Iustin Pop
5551 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5552 d61df03e Iustin Pop
    """Run the allocator test.
5553 d61df03e Iustin Pop

5554 d61df03e Iustin Pop
    """
5555 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5556 72737a7f Iustin Pop
      ial = IAllocator(self,
5557 29859cb7 Iustin Pop
                       mode=self.op.mode,
5558 29859cb7 Iustin Pop
                       name=self.op.name,
5559 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5560 29859cb7 Iustin Pop
                       disks=self.op.disks,
5561 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5562 29859cb7 Iustin Pop
                       os=self.op.os,
5563 29859cb7 Iustin Pop
                       tags=self.op.tags,
5564 29859cb7 Iustin Pop
                       nics=self.op.nics,
5565 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5566 29859cb7 Iustin Pop
                       )
5567 29859cb7 Iustin Pop
    else:
5568 72737a7f Iustin Pop
      ial = IAllocator(self,
5569 29859cb7 Iustin Pop
                       mode=self.op.mode,
5570 29859cb7 Iustin Pop
                       name=self.op.name,
5571 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5572 29859cb7 Iustin Pop
                       )
5573 d61df03e Iustin Pop
5574 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5575 d1c2dd75 Iustin Pop
      result = ial.in_text
5576 298fe380 Iustin Pop
    else:
5577 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5578 d1c2dd75 Iustin Pop
      result = ial.out_text
5579 298fe380 Iustin Pop
    return result