Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 23828f1c

History | View | Annotate | Download (189.7 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 74409b12 Iustin Pop
import copy
35 a8083063 Iustin Pop
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import logger
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 6048c986 Guido Trotter
from ganeti import locking
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 8d14b30d Iustin Pop
from ganeti import serializer
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 d465bdc8 Guido Trotter
    - implement ExpandNames
53 d465bdc8 Guido Trotter
    - implement CheckPrereq
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 05f86716 Guido Trotter
    - optionally redefine their run requirements:
58 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 a8083063 Iustin Pop
  REQ_MASTER = True
68 7e55040e Guido Trotter
  REQ_BGL = True
69 a8083063 Iustin Pop
70 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
71 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
72 a8083063 Iustin Pop

73 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
74 a8083063 Iustin Pop
    validity.
75 a8083063 Iustin Pop

76 a8083063 Iustin Pop
    """
77 5bfac263 Iustin Pop
    self.proc = processor
78 a8083063 Iustin Pop
    self.op = op
79 77b657a3 Guido Trotter
    self.cfg = context.cfg
80 77b657a3 Guido Trotter
    self.context = context
81 72737a7f Iustin Pop
    self.rpc = rpc
82 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
83 d465bdc8 Guido Trotter
    self.needed_locks = None
84 6683bba2 Guido Trotter
    self.acquired_locks = {}
85 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
86 ca2a79e1 Guido Trotter
    self.add_locks = {}
87 ca2a79e1 Guido Trotter
    self.remove_locks = {}
88 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
89 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
90 c92b310a Michael Hanselmann
    self.__ssh = None
91 c92b310a Michael Hanselmann
92 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
93 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
94 a8083063 Iustin Pop
      if attr_val is None:
95 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
96 3ecf6786 Iustin Pop
                                   attr_name)
97 c6d58a2b Michael Hanselmann
98 f64c9de6 Guido Trotter
    if not self.cfg.IsCluster():
99 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
100 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
101 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
102 d6a02168 Michael Hanselmann
      master = self.cfg.GetMasterNode()
103 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
104 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
105 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
106 a8083063 Iustin Pop
107 c92b310a Michael Hanselmann
  def __GetSSH(self):
108 c92b310a Michael Hanselmann
    """Returns the SshRunner object
109 c92b310a Michael Hanselmann

110 c92b310a Michael Hanselmann
    """
111 c92b310a Michael Hanselmann
    if not self.__ssh:
112 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
113 c92b310a Michael Hanselmann
    return self.__ssh
114 c92b310a Michael Hanselmann
115 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
116 c92b310a Michael Hanselmann
117 d465bdc8 Guido Trotter
  def ExpandNames(self):
118 d465bdc8 Guido Trotter
    """Expand names for this LU.
119 d465bdc8 Guido Trotter

120 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
121 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
122 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
123 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
124 d465bdc8 Guido Trotter

125 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
126 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
127 d465bdc8 Guido Trotter
    as values. Rules:
128 d465bdc8 Guido Trotter
      - Use an empty dict if you don't need any lock
129 d465bdc8 Guido Trotter
      - If you don't need any lock at a particular level omit that level
130 d465bdc8 Guido Trotter
      - Don't put anything for the BGL level
131 e310b019 Guido Trotter
      - If you want all locks at a level use locking.ALL_SET as a value
132 d465bdc8 Guido Trotter

133 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
134 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
135 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
136 3977a4c1 Guido Trotter

137 d465bdc8 Guido Trotter
    Examples:
138 d465bdc8 Guido Trotter
    # Acquire all nodes and one instance
139 d465bdc8 Guido Trotter
    self.needed_locks = {
140 e310b019 Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
141 3a5d7305 Guido Trotter
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
142 d465bdc8 Guido Trotter
    }
143 d465bdc8 Guido Trotter
    # Acquire just two nodes
144 d465bdc8 Guido Trotter
    self.needed_locks = {
145 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
146 d465bdc8 Guido Trotter
    }
147 d465bdc8 Guido Trotter
    # Acquire no locks
148 d465bdc8 Guido Trotter
    self.needed_locks = {} # No, you can't leave it to the default value None
149 d465bdc8 Guido Trotter

150 d465bdc8 Guido Trotter
    """
151 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
152 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
153 d465bdc8 Guido Trotter
    # time.
154 d465bdc8 Guido Trotter
    if self.REQ_BGL:
155 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
156 d465bdc8 Guido Trotter
    else:
157 d465bdc8 Guido Trotter
      raise NotImplementedError
158 d465bdc8 Guido Trotter
159 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
160 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
161 fb8dcb62 Guido Trotter

162 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
163 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
164 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
165 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
166 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
167 fb8dcb62 Guido Trotter
    default it does nothing.
168 fb8dcb62 Guido Trotter

169 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
170 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
171 fb8dcb62 Guido Trotter

172 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
173 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
174 fb8dcb62 Guido Trotter

175 fb8dcb62 Guido Trotter
    """
176 fb8dcb62 Guido Trotter
177 a8083063 Iustin Pop
  def CheckPrereq(self):
178 a8083063 Iustin Pop
    """Check prerequisites for this LU.
179 a8083063 Iustin Pop

180 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
181 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
182 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
183 a8083063 Iustin Pop
    allowed.
184 a8083063 Iustin Pop

185 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
186 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
187 a8083063 Iustin Pop

188 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
189 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
190 a8083063 Iustin Pop

191 a8083063 Iustin Pop
    """
192 a8083063 Iustin Pop
    raise NotImplementedError
193 a8083063 Iustin Pop
194 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
195 a8083063 Iustin Pop
    """Execute the LU.
196 a8083063 Iustin Pop

197 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
198 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
199 a8083063 Iustin Pop
    code, or expected.
200 a8083063 Iustin Pop

201 a8083063 Iustin Pop
    """
202 a8083063 Iustin Pop
    raise NotImplementedError
203 a8083063 Iustin Pop
204 a8083063 Iustin Pop
  def BuildHooksEnv(self):
205 a8083063 Iustin Pop
    """Build hooks environment for this LU.
206 a8083063 Iustin Pop

207 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
208 a8083063 Iustin Pop
    containing the environment that will be used for running the
209 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
210 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
211 a8083063 Iustin Pop
    the hook should run after the execution.
212 a8083063 Iustin Pop

213 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
214 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
215 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
216 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
217 a8083063 Iustin Pop

218 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
219 a8083063 Iustin Pop

220 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
221 a8083063 Iustin Pop
    not be called.
222 a8083063 Iustin Pop

223 a8083063 Iustin Pop
    """
224 a8083063 Iustin Pop
    raise NotImplementedError
225 a8083063 Iustin Pop
226 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
227 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
228 1fce5219 Guido Trotter

229 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
230 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
231 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
232 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
233 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
234 1fce5219 Guido Trotter

235 1fce5219 Guido Trotter
    Args:
236 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
237 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
238 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
239 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
240 1fce5219 Guido Trotter

241 1fce5219 Guido Trotter
    """
242 1fce5219 Guido Trotter
    return lu_result
243 1fce5219 Guido Trotter
244 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
245 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
246 43905206 Guido Trotter

247 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
248 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
249 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
250 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
251 43905206 Guido Trotter
    before.
252 43905206 Guido Trotter

253 43905206 Guido Trotter
    """
254 43905206 Guido Trotter
    if self.needed_locks is None:
255 43905206 Guido Trotter
      self.needed_locks = {}
256 43905206 Guido Trotter
    else:
257 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
258 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
259 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
260 43905206 Guido Trotter
    if expanded_name is None:
261 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
262 43905206 Guido Trotter
                                  self.op.instance_name)
263 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
264 43905206 Guido Trotter
    self.op.instance_name = expanded_name
265 43905206 Guido Trotter
266 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
267 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
268 c4a2fee1 Guido Trotter

269 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
270 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
271 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
272 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
273 c4a2fee1 Guido Trotter

274 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
275 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
276 c4a2fee1 Guido Trotter

277 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
278 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
279 c4a2fee1 Guido Trotter

280 c4a2fee1 Guido Trotter
    If should be called in DeclareLocks in a way similar to:
281 c4a2fee1 Guido Trotter

282 c4a2fee1 Guido Trotter
    if level == locking.LEVEL_NODE:
283 c4a2fee1 Guido Trotter
      self._LockInstancesNodes()
284 c4a2fee1 Guido Trotter

285 a82ce292 Guido Trotter
    @type primary_only: boolean
286 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
287 a82ce292 Guido Trotter

288 c4a2fee1 Guido Trotter
    """
289 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
290 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
291 c4a2fee1 Guido Trotter
292 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
293 c4a2fee1 Guido Trotter
294 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
295 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
296 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
297 c4a2fee1 Guido Trotter
    wanted_nodes = []
298 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
299 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
300 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
301 a82ce292 Guido Trotter
      if not primary_only:
302 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
303 9513b6ab Guido Trotter
304 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
305 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
306 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
307 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
308 c4a2fee1 Guido Trotter
309 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
310 c4a2fee1 Guido Trotter
311 a8083063 Iustin Pop
312 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
313 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
314 a8083063 Iustin Pop

315 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
316 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
317 a8083063 Iustin Pop

318 a8083063 Iustin Pop
  """
319 a8083063 Iustin Pop
  HPATH = None
320 a8083063 Iustin Pop
  HTYPE = None
321 a8083063 Iustin Pop
322 a8083063 Iustin Pop
323 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
324 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
325 83120a01 Michael Hanselmann

326 83120a01 Michael Hanselmann
  Args:
327 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
328 83120a01 Michael Hanselmann

329 83120a01 Michael Hanselmann
  """
330 3312b702 Iustin Pop
  if not isinstance(nodes, list):
331 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
332 dcb93971 Michael Hanselmann
333 ea47808a Guido Trotter
  if not nodes:
334 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
335 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
336 dcb93971 Michael Hanselmann
337 ea47808a Guido Trotter
  wanted = []
338 ea47808a Guido Trotter
  for name in nodes:
339 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
340 ea47808a Guido Trotter
    if node is None:
341 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
342 ea47808a Guido Trotter
    wanted.append(node)
343 dcb93971 Michael Hanselmann
344 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
345 3312b702 Iustin Pop
346 3312b702 Iustin Pop
347 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
348 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
349 3312b702 Iustin Pop

350 3312b702 Iustin Pop
  Args:
351 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
352 3312b702 Iustin Pop

353 3312b702 Iustin Pop
  """
354 3312b702 Iustin Pop
  if not isinstance(instances, list):
355 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
356 3312b702 Iustin Pop
357 3312b702 Iustin Pop
  if instances:
358 3312b702 Iustin Pop
    wanted = []
359 3312b702 Iustin Pop
360 3312b702 Iustin Pop
    for name in instances:
361 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
362 3312b702 Iustin Pop
      if instance is None:
363 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
364 3312b702 Iustin Pop
      wanted.append(instance)
365 3312b702 Iustin Pop
366 3312b702 Iustin Pop
  else:
367 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
368 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
369 dcb93971 Michael Hanselmann
370 dcb93971 Michael Hanselmann
371 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
372 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
373 83120a01 Michael Hanselmann

374 83120a01 Michael Hanselmann
  Args:
375 83120a01 Michael Hanselmann
    static: Static fields
376 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
377 83120a01 Michael Hanselmann

378 83120a01 Michael Hanselmann
  """
379 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
380 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
381 dcb93971 Michael Hanselmann
382 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
383 dcb93971 Michael Hanselmann
384 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
385 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
386 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
387 3ecf6786 Iustin Pop
                                          difference(all_fields)))
388 dcb93971 Michael Hanselmann
389 dcb93971 Michael Hanselmann
390 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
391 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
392 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
393 ecb215b5 Michael Hanselmann

394 ecb215b5 Michael Hanselmann
  Args:
395 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
396 396e1b78 Michael Hanselmann
  """
397 396e1b78 Michael Hanselmann
  env = {
398 0e137c28 Iustin Pop
    "OP_TARGET": name,
399 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
400 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
401 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
402 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
403 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
404 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
405 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
406 396e1b78 Michael Hanselmann
  }
407 396e1b78 Michael Hanselmann
408 396e1b78 Michael Hanselmann
  if nics:
409 396e1b78 Michael Hanselmann
    nic_count = len(nics)
410 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
411 396e1b78 Michael Hanselmann
      if ip is None:
412 396e1b78 Michael Hanselmann
        ip = ""
413 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
414 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
415 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
416 396e1b78 Michael Hanselmann
  else:
417 396e1b78 Michael Hanselmann
    nic_count = 0
418 396e1b78 Michael Hanselmann
419 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
420 396e1b78 Michael Hanselmann
421 396e1b78 Michael Hanselmann
  return env
422 396e1b78 Michael Hanselmann
423 396e1b78 Michael Hanselmann
424 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
425 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
426 ecb215b5 Michael Hanselmann

427 ecb215b5 Michael Hanselmann
  Args:
428 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
429 ecb215b5 Michael Hanselmann
    override: dict of values to override
430 ecb215b5 Michael Hanselmann
  """
431 338e51e8 Iustin Pop
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
432 396e1b78 Michael Hanselmann
  args = {
433 396e1b78 Michael Hanselmann
    'name': instance.name,
434 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
435 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
436 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
437 396e1b78 Michael Hanselmann
    'status': instance.os,
438 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
439 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
440 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
441 396e1b78 Michael Hanselmann
  }
442 396e1b78 Michael Hanselmann
  if override:
443 396e1b78 Michael Hanselmann
    args.update(override)
444 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
445 396e1b78 Michael Hanselmann
446 396e1b78 Michael Hanselmann
447 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
448 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
449 bf6929a2 Alexander Schreiber

450 bf6929a2 Alexander Schreiber
  """
451 bf6929a2 Alexander Schreiber
  # check bridges existance
452 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
453 72737a7f Iustin Pop
  if not lu.rpc.call_bridges_exist(instance.primary_node, brlist):
454 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
455 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
456 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
457 bf6929a2 Alexander Schreiber
458 bf6929a2 Alexander Schreiber
459 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
460 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
461 a8083063 Iustin Pop

462 a8083063 Iustin Pop
  """
463 a8083063 Iustin Pop
  _OP_REQP = []
464 a8083063 Iustin Pop
465 a8083063 Iustin Pop
  def CheckPrereq(self):
466 a8083063 Iustin Pop
    """Check prerequisites.
467 a8083063 Iustin Pop

468 a8083063 Iustin Pop
    This checks whether the cluster is empty.
469 a8083063 Iustin Pop

470 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
471 a8083063 Iustin Pop

472 a8083063 Iustin Pop
    """
473 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
474 a8083063 Iustin Pop
475 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
476 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
477 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
478 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
479 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
480 db915bd1 Michael Hanselmann
    if instancelist:
481 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
482 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
483 a8083063 Iustin Pop
484 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
485 a8083063 Iustin Pop
    """Destroys the cluster.
486 a8083063 Iustin Pop

487 a8083063 Iustin Pop
    """
488 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
489 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
490 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
491 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
492 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
493 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
494 140aa4a8 Iustin Pop
    return master
495 a8083063 Iustin Pop
496 a8083063 Iustin Pop
497 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
498 a8083063 Iustin Pop
  """Verifies the cluster status.
499 a8083063 Iustin Pop

500 a8083063 Iustin Pop
  """
501 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
502 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
503 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
504 d4b9d97f Guido Trotter
  REQ_BGL = False
505 d4b9d97f Guido Trotter
506 d4b9d97f Guido Trotter
  def ExpandNames(self):
507 d4b9d97f Guido Trotter
    self.needed_locks = {
508 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
509 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
510 d4b9d97f Guido Trotter
    }
511 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
512 a8083063 Iustin Pop
513 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
514 a8083063 Iustin Pop
                  remote_version, feedback_fn):
515 a8083063 Iustin Pop
    """Run multiple tests against a node.
516 a8083063 Iustin Pop

517 a8083063 Iustin Pop
    Test list:
518 a8083063 Iustin Pop
      - compares ganeti version
519 a8083063 Iustin Pop
      - checks vg existance and size > 20G
520 a8083063 Iustin Pop
      - checks config file checksum
521 a8083063 Iustin Pop
      - checks ssh to other nodes
522 a8083063 Iustin Pop

523 a8083063 Iustin Pop
    Args:
524 a8083063 Iustin Pop
      node: name of the node to check
525 a8083063 Iustin Pop
      file_list: required list of files
526 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
527 098c0958 Michael Hanselmann

528 a8083063 Iustin Pop
    """
529 a8083063 Iustin Pop
    # compares ganeti version
530 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
531 a8083063 Iustin Pop
    if not remote_version:
532 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
533 a8083063 Iustin Pop
      return True
534 a8083063 Iustin Pop
535 a8083063 Iustin Pop
    if local_version != remote_version:
536 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
537 a8083063 Iustin Pop
                      (local_version, node, remote_version))
538 a8083063 Iustin Pop
      return True
539 a8083063 Iustin Pop
540 a8083063 Iustin Pop
    # checks vg existance and size > 20G
541 a8083063 Iustin Pop
542 a8083063 Iustin Pop
    bad = False
543 a8083063 Iustin Pop
    if not vglist:
544 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
545 a8083063 Iustin Pop
                      (node,))
546 a8083063 Iustin Pop
      bad = True
547 a8083063 Iustin Pop
    else:
548 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
549 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
550 a8083063 Iustin Pop
      if vgstatus:
551 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
552 a8083063 Iustin Pop
        bad = True
553 a8083063 Iustin Pop
554 2eb78bc8 Guido Trotter
    if not node_result:
555 2eb78bc8 Guido Trotter
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
556 2eb78bc8 Guido Trotter
      return True
557 2eb78bc8 Guido Trotter
558 a8083063 Iustin Pop
    # checks config file checksum
559 a8083063 Iustin Pop
    # checks ssh to any
560 a8083063 Iustin Pop
561 a8083063 Iustin Pop
    if 'filelist' not in node_result:
562 a8083063 Iustin Pop
      bad = True
563 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
564 a8083063 Iustin Pop
    else:
565 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
566 a8083063 Iustin Pop
      for file_name in file_list:
567 a8083063 Iustin Pop
        if file_name not in remote_cksum:
568 a8083063 Iustin Pop
          bad = True
569 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
570 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
571 a8083063 Iustin Pop
          bad = True
572 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
573 a8083063 Iustin Pop
574 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
575 a8083063 Iustin Pop
      bad = True
576 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
577 a8083063 Iustin Pop
    else:
578 a8083063 Iustin Pop
      if node_result['nodelist']:
579 a8083063 Iustin Pop
        bad = True
580 a8083063 Iustin Pop
        for node in node_result['nodelist']:
581 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
582 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
583 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
584 9d4bfc96 Iustin Pop
      bad = True
585 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
586 9d4bfc96 Iustin Pop
    else:
587 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
588 9d4bfc96 Iustin Pop
        bad = True
589 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
590 9d4bfc96 Iustin Pop
        for node in nlist:
591 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
592 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
593 9d4bfc96 Iustin Pop
594 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
595 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
596 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
597 e69d05fd Iustin Pop
        if hv_result is not None:
598 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
599 e69d05fd Iustin Pop
                      (hv_name, hv_result))
600 a8083063 Iustin Pop
    return bad
601 a8083063 Iustin Pop
602 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
603 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
604 a8083063 Iustin Pop
    """Verify an instance.
605 a8083063 Iustin Pop

606 a8083063 Iustin Pop
    This function checks to see if the required block devices are
607 a8083063 Iustin Pop
    available on the instance's node.
608 a8083063 Iustin Pop

609 a8083063 Iustin Pop
    """
610 a8083063 Iustin Pop
    bad = False
611 a8083063 Iustin Pop
612 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
613 a8083063 Iustin Pop
614 a8083063 Iustin Pop
    node_vol_should = {}
615 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
616 a8083063 Iustin Pop
617 a8083063 Iustin Pop
    for node in node_vol_should:
618 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
619 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
620 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
621 a8083063 Iustin Pop
                          (volume, node))
622 a8083063 Iustin Pop
          bad = True
623 a8083063 Iustin Pop
624 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
625 a872dae6 Guido Trotter
      if (node_current not in node_instance or
626 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
627 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
628 a8083063 Iustin Pop
                        (instance, node_current))
629 a8083063 Iustin Pop
        bad = True
630 a8083063 Iustin Pop
631 a8083063 Iustin Pop
    for node in node_instance:
632 a8083063 Iustin Pop
      if (not node == node_current):
633 a8083063 Iustin Pop
        if instance in node_instance[node]:
634 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
635 a8083063 Iustin Pop
                          (instance, node))
636 a8083063 Iustin Pop
          bad = True
637 a8083063 Iustin Pop
638 6a438c98 Michael Hanselmann
    return bad
639 a8083063 Iustin Pop
640 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
641 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
642 a8083063 Iustin Pop

643 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
644 a8083063 Iustin Pop
    reported as unknown.
645 a8083063 Iustin Pop

646 a8083063 Iustin Pop
    """
647 a8083063 Iustin Pop
    bad = False
648 a8083063 Iustin Pop
649 a8083063 Iustin Pop
    for node in node_vol_is:
650 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
651 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
652 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
653 a8083063 Iustin Pop
                      (volume, node))
654 a8083063 Iustin Pop
          bad = True
655 a8083063 Iustin Pop
    return bad
656 a8083063 Iustin Pop
657 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
658 a8083063 Iustin Pop
    """Verify the list of running instances.
659 a8083063 Iustin Pop

660 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
661 a8083063 Iustin Pop

662 a8083063 Iustin Pop
    """
663 a8083063 Iustin Pop
    bad = False
664 a8083063 Iustin Pop
    for node in node_instance:
665 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
666 a8083063 Iustin Pop
        if runninginstance not in instancelist:
667 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
668 a8083063 Iustin Pop
                          (runninginstance, node))
669 a8083063 Iustin Pop
          bad = True
670 a8083063 Iustin Pop
    return bad
671 a8083063 Iustin Pop
672 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
673 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
674 2b3b6ddd Guido Trotter

675 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
676 2b3b6ddd Guido Trotter
    was primary for.
677 2b3b6ddd Guido Trotter

678 2b3b6ddd Guido Trotter
    """
679 2b3b6ddd Guido Trotter
    bad = False
680 2b3b6ddd Guido Trotter
681 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
682 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
683 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
684 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
685 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
686 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
687 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
688 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
689 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
690 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
691 2b3b6ddd Guido Trotter
        needed_mem = 0
692 2b3b6ddd Guido Trotter
        for instance in instances:
693 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
694 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
695 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
696 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
697 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
698 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
699 2b3b6ddd Guido Trotter
          bad = True
700 2b3b6ddd Guido Trotter
    return bad
701 2b3b6ddd Guido Trotter
702 a8083063 Iustin Pop
  def CheckPrereq(self):
703 a8083063 Iustin Pop
    """Check prerequisites.
704 a8083063 Iustin Pop

705 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
706 e54c4c5e Guido Trotter
    all its members are valid.
707 a8083063 Iustin Pop

708 a8083063 Iustin Pop
    """
709 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
710 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
711 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
712 a8083063 Iustin Pop
713 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
714 d8fff41c Guido Trotter
    """Build hooks env.
715 d8fff41c Guido Trotter

716 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
717 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
718 d8fff41c Guido Trotter

719 d8fff41c Guido Trotter
    """
720 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
721 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
722 d8fff41c Guido Trotter
    env = {}
723 d8fff41c Guido Trotter
    return env, [], all_nodes
724 d8fff41c Guido Trotter
725 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
726 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
727 a8083063 Iustin Pop

728 a8083063 Iustin Pop
    """
729 a8083063 Iustin Pop
    bad = False
730 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
731 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
732 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
733 a8083063 Iustin Pop
734 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
735 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
736 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
737 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
738 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
739 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
740 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
741 a8083063 Iustin Pop
    node_volume = {}
742 a8083063 Iustin Pop
    node_instance = {}
743 9c9c7d30 Guido Trotter
    node_info = {}
744 26b6af5e Guido Trotter
    instance_cfg = {}
745 a8083063 Iustin Pop
746 a8083063 Iustin Pop
    # FIXME: verify OS list
747 a8083063 Iustin Pop
    # do local checksums
748 d6a02168 Michael Hanselmann
    file_names = []
749 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
750 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
751 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
752 a8083063 Iustin Pop
753 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
754 72737a7f Iustin Pop
    all_volumeinfo = self.rpc.call_volume_list(nodelist, vg_name)
755 72737a7f Iustin Pop
    all_instanceinfo = self.rpc.call_instance_list(nodelist, hypervisors)
756 72737a7f Iustin Pop
    all_vglist = self.rpc.call_vg_list(nodelist)
757 a8083063 Iustin Pop
    node_verify_param = {
758 a8083063 Iustin Pop
      'filelist': file_names,
759 a8083063 Iustin Pop
      'nodelist': nodelist,
760 e69d05fd Iustin Pop
      'hypervisor': hypervisors,
761 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
762 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
763 a8083063 Iustin Pop
      }
764 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
765 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
766 72737a7f Iustin Pop
    all_rversion = self.rpc.call_version(nodelist)
767 72737a7f Iustin Pop
    all_ninfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(),
768 72737a7f Iustin Pop
                                        self.cfg.GetHypervisorType())
769 a8083063 Iustin Pop
770 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
771 a8083063 Iustin Pop
    for node in nodelist:
772 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
773 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
774 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
775 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
776 a8083063 Iustin Pop
      bad = bad or result
777 a8083063 Iustin Pop
778 a8083063 Iustin Pop
      # node_volume
779 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
780 a8083063 Iustin Pop
781 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
782 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
783 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
784 b63ed789 Iustin Pop
        bad = True
785 b63ed789 Iustin Pop
        node_volume[node] = {}
786 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
787 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
788 a8083063 Iustin Pop
        bad = True
789 a8083063 Iustin Pop
        continue
790 b63ed789 Iustin Pop
      else:
791 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
792 a8083063 Iustin Pop
793 a8083063 Iustin Pop
      # node_instance
794 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
795 a8083063 Iustin Pop
      if type(nodeinstance) != list:
796 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
797 a8083063 Iustin Pop
        bad = True
798 a8083063 Iustin Pop
        continue
799 a8083063 Iustin Pop
800 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
801 a8083063 Iustin Pop
802 9c9c7d30 Guido Trotter
      # node_info
803 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
804 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
805 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
806 9c9c7d30 Guido Trotter
        bad = True
807 9c9c7d30 Guido Trotter
        continue
808 9c9c7d30 Guido Trotter
809 9c9c7d30 Guido Trotter
      try:
810 9c9c7d30 Guido Trotter
        node_info[node] = {
811 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
812 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
813 93e4c50b Guido Trotter
          "pinst": [],
814 93e4c50b Guido Trotter
          "sinst": [],
815 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
816 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
817 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
818 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
819 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
820 36e7da50 Guido Trotter
          # secondary.
821 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
822 9c9c7d30 Guido Trotter
        }
823 9c9c7d30 Guido Trotter
      except ValueError:
824 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
825 9c9c7d30 Guido Trotter
        bad = True
826 9c9c7d30 Guido Trotter
        continue
827 9c9c7d30 Guido Trotter
828 a8083063 Iustin Pop
    node_vol_should = {}
829 a8083063 Iustin Pop
830 a8083063 Iustin Pop
    for instance in instancelist:
831 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
832 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
833 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
834 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
835 c5705f58 Guido Trotter
      bad = bad or result
836 a8083063 Iustin Pop
837 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
838 a8083063 Iustin Pop
839 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
840 26b6af5e Guido Trotter
841 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
842 93e4c50b Guido Trotter
      if pnode in node_info:
843 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
844 93e4c50b Guido Trotter
      else:
845 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
846 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
847 93e4c50b Guido Trotter
        bad = True
848 93e4c50b Guido Trotter
849 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
850 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
851 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
852 93e4c50b Guido Trotter
      # supported either.
853 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
854 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
855 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
856 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
857 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
858 93e4c50b Guido Trotter
                    % instance)
859 93e4c50b Guido Trotter
860 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
861 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
862 3924700f Iustin Pop
863 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
864 93e4c50b Guido Trotter
        if snode in node_info:
865 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
866 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
867 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
868 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
869 93e4c50b Guido Trotter
        else:
870 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
871 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
872 93e4c50b Guido Trotter
873 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
874 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
875 a8083063 Iustin Pop
                                       feedback_fn)
876 a8083063 Iustin Pop
    bad = bad or result
877 a8083063 Iustin Pop
878 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
879 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
880 a8083063 Iustin Pop
                                         feedback_fn)
881 a8083063 Iustin Pop
    bad = bad or result
882 a8083063 Iustin Pop
883 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
884 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
885 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
886 e54c4c5e Guido Trotter
      bad = bad or result
887 2b3b6ddd Guido Trotter
888 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
889 2b3b6ddd Guido Trotter
    if i_non_redundant:
890 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
891 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
892 2b3b6ddd Guido Trotter
893 3924700f Iustin Pop
    if i_non_a_balanced:
894 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
895 3924700f Iustin Pop
                  % len(i_non_a_balanced))
896 3924700f Iustin Pop
897 34290825 Michael Hanselmann
    return not bad
898 a8083063 Iustin Pop
899 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
900 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
901 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
902 d8fff41c Guido Trotter

903 d8fff41c Guido Trotter
    Args:
904 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
905 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
906 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
907 d8fff41c Guido Trotter
      lu_result: previous Exec result
908 d8fff41c Guido Trotter

909 d8fff41c Guido Trotter
    """
910 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
911 38206f3c Iustin Pop
    # their results
912 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
913 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
914 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
915 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
916 d8fff41c Guido Trotter
      if not hooks_results:
917 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
918 d8fff41c Guido Trotter
        lu_result = 1
919 d8fff41c Guido Trotter
      else:
920 d8fff41c Guido Trotter
        for node_name in hooks_results:
921 d8fff41c Guido Trotter
          show_node_header = True
922 d8fff41c Guido Trotter
          res = hooks_results[node_name]
923 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
924 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
925 d8fff41c Guido Trotter
            lu_result = 1
926 d8fff41c Guido Trotter
            continue
927 d8fff41c Guido Trotter
          for script, hkr, output in res:
928 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
929 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
930 d8fff41c Guido Trotter
              # failing hooks on that node
931 d8fff41c Guido Trotter
              if show_node_header:
932 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
933 d8fff41c Guido Trotter
                show_node_header = False
934 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
935 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
936 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
937 d8fff41c Guido Trotter
              lu_result = 1
938 d8fff41c Guido Trotter
939 d8fff41c Guido Trotter
      return lu_result
940 d8fff41c Guido Trotter
941 a8083063 Iustin Pop
942 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
943 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
944 2c95a8d4 Iustin Pop

945 2c95a8d4 Iustin Pop
  """
946 2c95a8d4 Iustin Pop
  _OP_REQP = []
947 d4b9d97f Guido Trotter
  REQ_BGL = False
948 d4b9d97f Guido Trotter
949 d4b9d97f Guido Trotter
  def ExpandNames(self):
950 d4b9d97f Guido Trotter
    self.needed_locks = {
951 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
952 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
953 d4b9d97f Guido Trotter
    }
954 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
955 2c95a8d4 Iustin Pop
956 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
957 2c95a8d4 Iustin Pop
    """Check prerequisites.
958 2c95a8d4 Iustin Pop

959 2c95a8d4 Iustin Pop
    This has no prerequisites.
960 2c95a8d4 Iustin Pop

961 2c95a8d4 Iustin Pop
    """
962 2c95a8d4 Iustin Pop
    pass
963 2c95a8d4 Iustin Pop
964 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
965 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
966 2c95a8d4 Iustin Pop

967 2c95a8d4 Iustin Pop
    """
968 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
969 2c95a8d4 Iustin Pop
970 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
971 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
972 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
973 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
974 2c95a8d4 Iustin Pop
975 2c95a8d4 Iustin Pop
    nv_dict = {}
976 2c95a8d4 Iustin Pop
    for inst in instances:
977 2c95a8d4 Iustin Pop
      inst_lvs = {}
978 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
979 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
980 2c95a8d4 Iustin Pop
        continue
981 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
982 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
983 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
984 2c95a8d4 Iustin Pop
        for vol in vol_list:
985 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
986 2c95a8d4 Iustin Pop
987 2c95a8d4 Iustin Pop
    if not nv_dict:
988 2c95a8d4 Iustin Pop
      return result
989 2c95a8d4 Iustin Pop
990 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
991 2c95a8d4 Iustin Pop
992 2c95a8d4 Iustin Pop
    to_act = set()
993 2c95a8d4 Iustin Pop
    for node in nodes:
994 2c95a8d4 Iustin Pop
      # node_volume
995 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
996 2c95a8d4 Iustin Pop
997 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
998 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
999 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1000 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1001 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
1002 2c95a8d4 Iustin Pop
                    (node,))
1003 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1004 2c95a8d4 Iustin Pop
        continue
1005 2c95a8d4 Iustin Pop
1006 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1007 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1008 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1009 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1010 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1011 2c95a8d4 Iustin Pop
1012 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1013 b63ed789 Iustin Pop
    # data better
1014 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1015 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1016 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1017 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1018 b63ed789 Iustin Pop
1019 2c95a8d4 Iustin Pop
    return result
1020 2c95a8d4 Iustin Pop
1021 2c95a8d4 Iustin Pop
1022 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1023 07bd8a51 Iustin Pop
  """Rename the cluster.
1024 07bd8a51 Iustin Pop

1025 07bd8a51 Iustin Pop
  """
1026 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1027 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1028 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1029 07bd8a51 Iustin Pop
1030 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1031 07bd8a51 Iustin Pop
    """Build hooks env.
1032 07bd8a51 Iustin Pop

1033 07bd8a51 Iustin Pop
    """
1034 07bd8a51 Iustin Pop
    env = {
1035 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1036 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1037 07bd8a51 Iustin Pop
      }
1038 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1039 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1040 07bd8a51 Iustin Pop
1041 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1042 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1043 07bd8a51 Iustin Pop

1044 07bd8a51 Iustin Pop
    """
1045 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1046 07bd8a51 Iustin Pop
1047 bcf043c9 Iustin Pop
    new_name = hostname.name
1048 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1049 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1050 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1051 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1052 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1053 07bd8a51 Iustin Pop
                                 " cluster has changed")
1054 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1055 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1056 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1057 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1058 07bd8a51 Iustin Pop
                                   new_ip)
1059 07bd8a51 Iustin Pop
1060 07bd8a51 Iustin Pop
    self.op.name = new_name
1061 07bd8a51 Iustin Pop
1062 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1063 07bd8a51 Iustin Pop
    """Rename the cluster.
1064 07bd8a51 Iustin Pop

1065 07bd8a51 Iustin Pop
    """
1066 07bd8a51 Iustin Pop
    clustername = self.op.name
1067 07bd8a51 Iustin Pop
    ip = self.ip
1068 07bd8a51 Iustin Pop
1069 07bd8a51 Iustin Pop
    # shutdown the master IP
1070 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1071 72737a7f Iustin Pop
    if not self.rpc.call_node_stop_master(master, False):
1072 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1073 07bd8a51 Iustin Pop
1074 07bd8a51 Iustin Pop
    try:
1075 07bd8a51 Iustin Pop
      # modify the sstore
1076 d6a02168 Michael Hanselmann
      # TODO: sstore
1077 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1078 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1079 07bd8a51 Iustin Pop
1080 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1081 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1082 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1083 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1084 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1085 07bd8a51 Iustin Pop
1086 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1087 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1088 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1089 72737a7f Iustin Pop
        result = self.rpc.call_upload_file(dist_nodes, fname)
1090 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1091 07bd8a51 Iustin Pop
          if not result[to_node]:
1092 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1093 07bd8a51 Iustin Pop
                         (fname, to_node))
1094 07bd8a51 Iustin Pop
    finally:
1095 72737a7f Iustin Pop
      if not self.rpc.call_node_start_master(master, False):
1096 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1097 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1098 07bd8a51 Iustin Pop
1099 07bd8a51 Iustin Pop
1100 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1101 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1102 8084f9f6 Manuel Franceschini

1103 8084f9f6 Manuel Franceschini
  Args:
1104 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1105 8084f9f6 Manuel Franceschini

1106 8084f9f6 Manuel Franceschini
  Returns:
1107 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1108 8084f9f6 Manuel Franceschini

1109 8084f9f6 Manuel Franceschini
  """
1110 8084f9f6 Manuel Franceschini
  if disk.children:
1111 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1112 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1113 8084f9f6 Manuel Franceschini
        return True
1114 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1115 8084f9f6 Manuel Franceschini
1116 8084f9f6 Manuel Franceschini
1117 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1118 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1119 8084f9f6 Manuel Franceschini

1120 8084f9f6 Manuel Franceschini
  """
1121 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1122 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1123 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1124 c53279cf Guido Trotter
  REQ_BGL = False
1125 c53279cf Guido Trotter
1126 c53279cf Guido Trotter
  def ExpandNames(self):
1127 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1128 c53279cf Guido Trotter
    # all nodes to be modified.
1129 c53279cf Guido Trotter
    self.needed_locks = {
1130 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1131 c53279cf Guido Trotter
    }
1132 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1133 8084f9f6 Manuel Franceschini
1134 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1135 8084f9f6 Manuel Franceschini
    """Build hooks env.
1136 8084f9f6 Manuel Franceschini

1137 8084f9f6 Manuel Franceschini
    """
1138 8084f9f6 Manuel Franceschini
    env = {
1139 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1140 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1141 8084f9f6 Manuel Franceschini
      }
1142 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1143 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1144 8084f9f6 Manuel Franceschini
1145 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1146 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1147 8084f9f6 Manuel Franceschini

1148 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1149 5f83e263 Iustin Pop
    if the given volume group is valid.
1150 8084f9f6 Manuel Franceschini

1151 8084f9f6 Manuel Franceschini
    """
1152 c53279cf Guido Trotter
    # FIXME: This only works because there is only one parameter that can be
1153 c53279cf Guido Trotter
    # changed or removed.
1154 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1155 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1156 8084f9f6 Manuel Franceschini
      for inst in instances:
1157 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1158 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1159 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1160 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1161 8084f9f6 Manuel Franceschini
1162 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1163 779c15bb Iustin Pop
1164 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1165 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1166 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1167 8084f9f6 Manuel Franceschini
      for node in node_list:
1168 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1169 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1170 8084f9f6 Manuel Franceschini
        if vgstatus:
1171 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1172 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1173 8084f9f6 Manuel Franceschini
1174 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1175 779c15bb Iustin Pop
    # beparams changes do not need validation (we can't validate?),
1176 779c15bb Iustin Pop
    # but we still process here
1177 779c15bb Iustin Pop
    if self.op.beparams:
1178 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1179 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1180 779c15bb Iustin Pop
1181 779c15bb Iustin Pop
    # hypervisor list/parameters
1182 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1183 779c15bb Iustin Pop
    if self.op.hvparams:
1184 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1185 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1186 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1187 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1188 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1189 779c15bb Iustin Pop
        else:
1190 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1191 779c15bb Iustin Pop
1192 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1193 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1194 779c15bb Iustin Pop
    else:
1195 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1196 779c15bb Iustin Pop
1197 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1198 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1199 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1200 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1201 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1202 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1203 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1204 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1205 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1206 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1207 779c15bb Iustin Pop
1208 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1209 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1210 8084f9f6 Manuel Franceschini

1211 8084f9f6 Manuel Franceschini
    """
1212 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1213 779c15bb Iustin Pop
      if self.op.vg_name != self.cfg.GetVGName():
1214 779c15bb Iustin Pop
        self.cfg.SetVGName(self.op.vg_name)
1215 779c15bb Iustin Pop
      else:
1216 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1217 779c15bb Iustin Pop
                    " state, not changing")
1218 779c15bb Iustin Pop
    if self.op.hvparams:
1219 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1220 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1221 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1222 779c15bb Iustin Pop
    if self.op.beparams:
1223 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1224 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1225 8084f9f6 Manuel Franceschini
1226 8084f9f6 Manuel Franceschini
1227 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1228 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1229 a8083063 Iustin Pop

1230 a8083063 Iustin Pop
  """
1231 a8083063 Iustin Pop
  if not instance.disks:
1232 a8083063 Iustin Pop
    return True
1233 a8083063 Iustin Pop
1234 a8083063 Iustin Pop
  if not oneshot:
1235 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1236 a8083063 Iustin Pop
1237 a8083063 Iustin Pop
  node = instance.primary_node
1238 a8083063 Iustin Pop
1239 a8083063 Iustin Pop
  for dev in instance.disks:
1240 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1241 a8083063 Iustin Pop
1242 a8083063 Iustin Pop
  retries = 0
1243 a8083063 Iustin Pop
  while True:
1244 a8083063 Iustin Pop
    max_time = 0
1245 a8083063 Iustin Pop
    done = True
1246 a8083063 Iustin Pop
    cumul_degraded = False
1247 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1248 a8083063 Iustin Pop
    if not rstats:
1249 b9bddb6b Iustin Pop
      lu.proc.LogWarning("Can't get any data from node %s" % node)
1250 a8083063 Iustin Pop
      retries += 1
1251 a8083063 Iustin Pop
      if retries >= 10:
1252 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1253 3ecf6786 Iustin Pop
                                 " aborting." % node)
1254 a8083063 Iustin Pop
      time.sleep(6)
1255 a8083063 Iustin Pop
      continue
1256 a8083063 Iustin Pop
    retries = 0
1257 a8083063 Iustin Pop
    for i in range(len(rstats)):
1258 a8083063 Iustin Pop
      mstat = rstats[i]
1259 a8083063 Iustin Pop
      if mstat is None:
1260 b9bddb6b Iustin Pop
        lu.proc.LogWarning("Can't compute data for node %s/%s" %
1261 b9bddb6b Iustin Pop
                           (node, instance.disks[i].iv_name))
1262 a8083063 Iustin Pop
        continue
1263 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1264 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1265 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1266 a8083063 Iustin Pop
      if perc_done is not None:
1267 a8083063 Iustin Pop
        done = False
1268 a8083063 Iustin Pop
        if est_time is not None:
1269 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1270 a8083063 Iustin Pop
          max_time = est_time
1271 a8083063 Iustin Pop
        else:
1272 a8083063 Iustin Pop
          rem_time = "no time estimate"
1273 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1274 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1275 a8083063 Iustin Pop
    if done or oneshot:
1276 a8083063 Iustin Pop
      break
1277 a8083063 Iustin Pop
1278 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1279 a8083063 Iustin Pop
1280 a8083063 Iustin Pop
  if done:
1281 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1282 a8083063 Iustin Pop
  return not cumul_degraded
1283 a8083063 Iustin Pop
1284 a8083063 Iustin Pop
1285 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1286 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1287 a8083063 Iustin Pop

1288 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1289 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1290 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1291 0834c866 Iustin Pop

1292 a8083063 Iustin Pop
  """
1293 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1294 0834c866 Iustin Pop
  if ldisk:
1295 0834c866 Iustin Pop
    idx = 6
1296 0834c866 Iustin Pop
  else:
1297 0834c866 Iustin Pop
    idx = 5
1298 a8083063 Iustin Pop
1299 a8083063 Iustin Pop
  result = True
1300 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1301 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1302 a8083063 Iustin Pop
    if not rstats:
1303 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1304 a8083063 Iustin Pop
      result = False
1305 a8083063 Iustin Pop
    else:
1306 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1307 a8083063 Iustin Pop
  if dev.children:
1308 a8083063 Iustin Pop
    for child in dev.children:
1309 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1310 a8083063 Iustin Pop
1311 a8083063 Iustin Pop
  return result
1312 a8083063 Iustin Pop
1313 a8083063 Iustin Pop
1314 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1315 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1316 a8083063 Iustin Pop

1317 a8083063 Iustin Pop
  """
1318 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1319 6bf01bbb Guido Trotter
  REQ_BGL = False
1320 a8083063 Iustin Pop
1321 6bf01bbb Guido Trotter
  def ExpandNames(self):
1322 1f9430d6 Iustin Pop
    if self.op.names:
1323 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1324 1f9430d6 Iustin Pop
1325 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1326 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1327 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1328 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1329 1f9430d6 Iustin Pop
1330 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1331 6bf01bbb Guido Trotter
    self.needed_locks = {}
1332 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1333 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1334 6bf01bbb Guido Trotter
1335 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1336 6bf01bbb Guido Trotter
    """Check prerequisites.
1337 6bf01bbb Guido Trotter

1338 6bf01bbb Guido Trotter
    """
1339 6bf01bbb Guido Trotter
1340 1f9430d6 Iustin Pop
  @staticmethod
1341 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1342 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1343 1f9430d6 Iustin Pop

1344 1f9430d6 Iustin Pop
      Args:
1345 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1346 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1347 1f9430d6 Iustin Pop

1348 1f9430d6 Iustin Pop
      Returns:
1349 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1350 1f9430d6 Iustin Pop
             nodes as
1351 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1352 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1353 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1354 1f9430d6 Iustin Pop
                  }
1355 1f9430d6 Iustin Pop

1356 1f9430d6 Iustin Pop
    """
1357 1f9430d6 Iustin Pop
    all_os = {}
1358 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1359 1f9430d6 Iustin Pop
      if not nr:
1360 1f9430d6 Iustin Pop
        continue
1361 b4de68a9 Iustin Pop
      for os_obj in nr:
1362 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1363 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1364 1f9430d6 Iustin Pop
          # for each node in node_list
1365 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1366 1f9430d6 Iustin Pop
          for nname in node_list:
1367 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1368 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1369 1f9430d6 Iustin Pop
    return all_os
1370 a8083063 Iustin Pop
1371 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1372 a8083063 Iustin Pop
    """Compute the list of OSes.
1373 a8083063 Iustin Pop

1374 a8083063 Iustin Pop
    """
1375 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1376 72737a7f Iustin Pop
    node_data = self.rpc.call_os_diagnose(node_list)
1377 a8083063 Iustin Pop
    if node_data == False:
1378 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1379 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1380 1f9430d6 Iustin Pop
    output = []
1381 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1382 1f9430d6 Iustin Pop
      row = []
1383 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1384 1f9430d6 Iustin Pop
        if field == "name":
1385 1f9430d6 Iustin Pop
          val = os_name
1386 1f9430d6 Iustin Pop
        elif field == "valid":
1387 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1388 1f9430d6 Iustin Pop
        elif field == "node_status":
1389 1f9430d6 Iustin Pop
          val = {}
1390 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1391 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1392 1f9430d6 Iustin Pop
        else:
1393 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1394 1f9430d6 Iustin Pop
        row.append(val)
1395 1f9430d6 Iustin Pop
      output.append(row)
1396 1f9430d6 Iustin Pop
1397 1f9430d6 Iustin Pop
    return output
1398 a8083063 Iustin Pop
1399 a8083063 Iustin Pop
1400 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1401 a8083063 Iustin Pop
  """Logical unit for removing a node.
1402 a8083063 Iustin Pop

1403 a8083063 Iustin Pop
  """
1404 a8083063 Iustin Pop
  HPATH = "node-remove"
1405 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1406 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1407 a8083063 Iustin Pop
1408 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1409 a8083063 Iustin Pop
    """Build hooks env.
1410 a8083063 Iustin Pop

1411 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1412 d08869ee Guido Trotter
    node would then be impossible to remove.
1413 a8083063 Iustin Pop

1414 a8083063 Iustin Pop
    """
1415 396e1b78 Michael Hanselmann
    env = {
1416 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1417 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1418 396e1b78 Michael Hanselmann
      }
1419 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1420 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1421 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1422 a8083063 Iustin Pop
1423 a8083063 Iustin Pop
  def CheckPrereq(self):
1424 a8083063 Iustin Pop
    """Check prerequisites.
1425 a8083063 Iustin Pop

1426 a8083063 Iustin Pop
    This checks:
1427 a8083063 Iustin Pop
     - the node exists in the configuration
1428 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1429 a8083063 Iustin Pop
     - it's not the master
1430 a8083063 Iustin Pop

1431 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1432 a8083063 Iustin Pop

1433 a8083063 Iustin Pop
    """
1434 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1435 a8083063 Iustin Pop
    if node is None:
1436 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1437 a8083063 Iustin Pop
1438 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1439 a8083063 Iustin Pop
1440 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1441 a8083063 Iustin Pop
    if node.name == masternode:
1442 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1443 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1444 a8083063 Iustin Pop
1445 a8083063 Iustin Pop
    for instance_name in instance_list:
1446 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1447 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1448 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1449 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1450 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1451 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1452 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1453 a8083063 Iustin Pop
    self.op.node_name = node.name
1454 a8083063 Iustin Pop
    self.node = node
1455 a8083063 Iustin Pop
1456 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1457 a8083063 Iustin Pop
    """Removes the node from the cluster.
1458 a8083063 Iustin Pop

1459 a8083063 Iustin Pop
    """
1460 a8083063 Iustin Pop
    node = self.node
1461 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1462 a8083063 Iustin Pop
                node.name)
1463 a8083063 Iustin Pop
1464 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1465 a8083063 Iustin Pop
1466 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1467 c8a0948f Michael Hanselmann
1468 a8083063 Iustin Pop
1469 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1470 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1471 a8083063 Iustin Pop

1472 a8083063 Iustin Pop
  """
1473 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1474 35705d8f Guido Trotter
  REQ_BGL = False
1475 a8083063 Iustin Pop
1476 35705d8f Guido Trotter
  def ExpandNames(self):
1477 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1478 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1479 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1480 e8a4c138 Iustin Pop
      "bootid",
1481 e8a4c138 Iustin Pop
      "ctotal",
1482 e8a4c138 Iustin Pop
      ])
1483 a8083063 Iustin Pop
1484 c8d8b4c8 Iustin Pop
    self.static_fields = frozenset([
1485 c8d8b4c8 Iustin Pop
      "name", "pinst_cnt", "sinst_cnt",
1486 c8d8b4c8 Iustin Pop
      "pinst_list", "sinst_list",
1487 c8d8b4c8 Iustin Pop
      "pip", "sip", "tags",
1488 38d7239a Iustin Pop
      "serial_no",
1489 c8d8b4c8 Iustin Pop
      ])
1490 c8d8b4c8 Iustin Pop
1491 c8d8b4c8 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
1492 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1493 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1494 a8083063 Iustin Pop
1495 35705d8f Guido Trotter
    self.needed_locks = {}
1496 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1497 c8d8b4c8 Iustin Pop
1498 c8d8b4c8 Iustin Pop
    if self.op.names:
1499 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1500 35705d8f Guido Trotter
    else:
1501 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1502 c8d8b4c8 Iustin Pop
1503 c8d8b4c8 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
1504 c8d8b4c8 Iustin Pop
    if self.do_locking:
1505 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1506 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1507 c8d8b4c8 Iustin Pop
1508 35705d8f Guido Trotter
1509 35705d8f Guido Trotter
  def CheckPrereq(self):
1510 35705d8f Guido Trotter
    """Check prerequisites.
1511 35705d8f Guido Trotter

1512 35705d8f Guido Trotter
    """
1513 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1514 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1515 c8d8b4c8 Iustin Pop
    pass
1516 a8083063 Iustin Pop
1517 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1518 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1519 a8083063 Iustin Pop

1520 a8083063 Iustin Pop
    """
1521 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1522 c8d8b4c8 Iustin Pop
    if self.do_locking:
1523 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1524 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1525 3fa93523 Guido Trotter
      nodenames = self.wanted
1526 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1527 3fa93523 Guido Trotter
      if missing:
1528 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1529 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1530 c8d8b4c8 Iustin Pop
    else:
1531 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1532 c1f1cbb2 Iustin Pop
1533 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
1534 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1535 a8083063 Iustin Pop
1536 a8083063 Iustin Pop
    # begin data gathering
1537 a8083063 Iustin Pop
1538 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1539 a8083063 Iustin Pop
      live_data = {}
1540 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1541 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
1542 a8083063 Iustin Pop
      for name in nodenames:
1543 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1544 a8083063 Iustin Pop
        if nodeinfo:
1545 a8083063 Iustin Pop
          live_data[name] = {
1546 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1547 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1548 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1549 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1550 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1551 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1552 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1553 a8083063 Iustin Pop
            }
1554 a8083063 Iustin Pop
        else:
1555 a8083063 Iustin Pop
          live_data[name] = {}
1556 a8083063 Iustin Pop
    else:
1557 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1558 a8083063 Iustin Pop
1559 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1560 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1561 a8083063 Iustin Pop
1562 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1563 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1564 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1565 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1566 a8083063 Iustin Pop
1567 ec223efb Iustin Pop
      for instance_name in instancelist:
1568 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1569 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1570 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1571 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1572 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1573 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1574 a8083063 Iustin Pop
1575 a8083063 Iustin Pop
    # end data gathering
1576 a8083063 Iustin Pop
1577 a8083063 Iustin Pop
    output = []
1578 a8083063 Iustin Pop
    for node in nodelist:
1579 a8083063 Iustin Pop
      node_output = []
1580 a8083063 Iustin Pop
      for field in self.op.output_fields:
1581 a8083063 Iustin Pop
        if field == "name":
1582 a8083063 Iustin Pop
          val = node.name
1583 ec223efb Iustin Pop
        elif field == "pinst_list":
1584 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1585 ec223efb Iustin Pop
        elif field == "sinst_list":
1586 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1587 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1588 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1589 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1590 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1591 a8083063 Iustin Pop
        elif field == "pip":
1592 a8083063 Iustin Pop
          val = node.primary_ip
1593 a8083063 Iustin Pop
        elif field == "sip":
1594 a8083063 Iustin Pop
          val = node.secondary_ip
1595 130a6a6f Iustin Pop
        elif field == "tags":
1596 130a6a6f Iustin Pop
          val = list(node.GetTags())
1597 38d7239a Iustin Pop
        elif field == "serial_no":
1598 38d7239a Iustin Pop
          val = node.serial_no
1599 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1600 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1601 a8083063 Iustin Pop
        else:
1602 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1603 a8083063 Iustin Pop
        node_output.append(val)
1604 a8083063 Iustin Pop
      output.append(node_output)
1605 a8083063 Iustin Pop
1606 a8083063 Iustin Pop
    return output
1607 a8083063 Iustin Pop
1608 a8083063 Iustin Pop
1609 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1610 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1611 dcb93971 Michael Hanselmann

1612 dcb93971 Michael Hanselmann
  """
1613 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1614 21a15682 Guido Trotter
  REQ_BGL = False
1615 21a15682 Guido Trotter
1616 21a15682 Guido Trotter
  def ExpandNames(self):
1617 21a15682 Guido Trotter
    _CheckOutputFields(static=["node"],
1618 21a15682 Guido Trotter
                       dynamic=["phys", "vg", "name", "size", "instance"],
1619 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1620 21a15682 Guido Trotter
1621 21a15682 Guido Trotter
    self.needed_locks = {}
1622 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1623 21a15682 Guido Trotter
    if not self.op.nodes:
1624 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1625 21a15682 Guido Trotter
    else:
1626 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1627 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1628 dcb93971 Michael Hanselmann
1629 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1630 dcb93971 Michael Hanselmann
    """Check prerequisites.
1631 dcb93971 Michael Hanselmann

1632 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1633 dcb93971 Michael Hanselmann

1634 dcb93971 Michael Hanselmann
    """
1635 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1636 dcb93971 Michael Hanselmann
1637 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1638 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1639 dcb93971 Michael Hanselmann

1640 dcb93971 Michael Hanselmann
    """
1641 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1642 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
1643 dcb93971 Michael Hanselmann
1644 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1645 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1646 dcb93971 Michael Hanselmann
1647 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1648 dcb93971 Michael Hanselmann
1649 dcb93971 Michael Hanselmann
    output = []
1650 dcb93971 Michael Hanselmann
    for node in nodenames:
1651 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1652 37d19eb2 Michael Hanselmann
        continue
1653 37d19eb2 Michael Hanselmann
1654 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1655 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1656 dcb93971 Michael Hanselmann
1657 dcb93971 Michael Hanselmann
      for vol in node_vols:
1658 dcb93971 Michael Hanselmann
        node_output = []
1659 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1660 dcb93971 Michael Hanselmann
          if field == "node":
1661 dcb93971 Michael Hanselmann
            val = node
1662 dcb93971 Michael Hanselmann
          elif field == "phys":
1663 dcb93971 Michael Hanselmann
            val = vol['dev']
1664 dcb93971 Michael Hanselmann
          elif field == "vg":
1665 dcb93971 Michael Hanselmann
            val = vol['vg']
1666 dcb93971 Michael Hanselmann
          elif field == "name":
1667 dcb93971 Michael Hanselmann
            val = vol['name']
1668 dcb93971 Michael Hanselmann
          elif field == "size":
1669 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1670 dcb93971 Michael Hanselmann
          elif field == "instance":
1671 dcb93971 Michael Hanselmann
            for inst in ilist:
1672 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1673 dcb93971 Michael Hanselmann
                continue
1674 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1675 dcb93971 Michael Hanselmann
                val = inst.name
1676 dcb93971 Michael Hanselmann
                break
1677 dcb93971 Michael Hanselmann
            else:
1678 dcb93971 Michael Hanselmann
              val = '-'
1679 dcb93971 Michael Hanselmann
          else:
1680 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1681 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1682 dcb93971 Michael Hanselmann
1683 dcb93971 Michael Hanselmann
        output.append(node_output)
1684 dcb93971 Michael Hanselmann
1685 dcb93971 Michael Hanselmann
    return output
1686 dcb93971 Michael Hanselmann
1687 dcb93971 Michael Hanselmann
1688 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1689 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1690 a8083063 Iustin Pop

1691 a8083063 Iustin Pop
  """
1692 a8083063 Iustin Pop
  HPATH = "node-add"
1693 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1694 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1695 a8083063 Iustin Pop
1696 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1697 a8083063 Iustin Pop
    """Build hooks env.
1698 a8083063 Iustin Pop

1699 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1700 a8083063 Iustin Pop

1701 a8083063 Iustin Pop
    """
1702 a8083063 Iustin Pop
    env = {
1703 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1704 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1705 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1706 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1707 a8083063 Iustin Pop
      }
1708 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1709 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1710 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1711 a8083063 Iustin Pop
1712 a8083063 Iustin Pop
  def CheckPrereq(self):
1713 a8083063 Iustin Pop
    """Check prerequisites.
1714 a8083063 Iustin Pop

1715 a8083063 Iustin Pop
    This checks:
1716 a8083063 Iustin Pop
     - the new node is not already in the config
1717 a8083063 Iustin Pop
     - it is resolvable
1718 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1719 a8083063 Iustin Pop

1720 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1721 a8083063 Iustin Pop

1722 a8083063 Iustin Pop
    """
1723 a8083063 Iustin Pop
    node_name = self.op.node_name
1724 a8083063 Iustin Pop
    cfg = self.cfg
1725 a8083063 Iustin Pop
1726 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1727 a8083063 Iustin Pop
1728 bcf043c9 Iustin Pop
    node = dns_data.name
1729 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1730 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1731 a8083063 Iustin Pop
    if secondary_ip is None:
1732 a8083063 Iustin Pop
      secondary_ip = primary_ip
1733 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1734 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1735 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1736 e7c6e02b Michael Hanselmann
1737 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1738 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1739 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1740 e7c6e02b Michael Hanselmann
                                 node)
1741 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1742 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1743 a8083063 Iustin Pop
1744 a8083063 Iustin Pop
    for existing_node_name in node_list:
1745 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1746 e7c6e02b Michael Hanselmann
1747 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1748 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1749 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1750 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1751 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1752 e7c6e02b Michael Hanselmann
        continue
1753 e7c6e02b Michael Hanselmann
1754 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1755 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1756 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1757 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1758 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1759 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1760 a8083063 Iustin Pop
1761 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1762 a8083063 Iustin Pop
    # same as for the master
1763 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
1764 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1765 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1766 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1767 a8083063 Iustin Pop
      if master_singlehomed:
1768 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1769 3ecf6786 Iustin Pop
                                   " new node has one")
1770 a8083063 Iustin Pop
      else:
1771 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1772 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1773 a8083063 Iustin Pop
1774 a8083063 Iustin Pop
    # checks reachablity
1775 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1776 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1777 a8083063 Iustin Pop
1778 a8083063 Iustin Pop
    if not newbie_singlehomed:
1779 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1780 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1781 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1782 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1783 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1784 a8083063 Iustin Pop
1785 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1786 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1787 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1788 a8083063 Iustin Pop
1789 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1790 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1791 a8083063 Iustin Pop

1792 a8083063 Iustin Pop
    """
1793 a8083063 Iustin Pop
    new_node = self.new_node
1794 a8083063 Iustin Pop
    node = new_node.name
1795 a8083063 Iustin Pop
1796 a8083063 Iustin Pop
    # check connectivity
1797 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
1798 a8083063 Iustin Pop
    if result:
1799 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1800 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1801 a8083063 Iustin Pop
                    (node, result))
1802 a8083063 Iustin Pop
      else:
1803 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1804 3ecf6786 Iustin Pop
                                 " node version %s" %
1805 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1806 a8083063 Iustin Pop
    else:
1807 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1808 a8083063 Iustin Pop
1809 a8083063 Iustin Pop
    # setup ssh on node
1810 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1811 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1812 a8083063 Iustin Pop
    keyarray = []
1813 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1814 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1815 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1816 a8083063 Iustin Pop
1817 a8083063 Iustin Pop
    for i in keyfiles:
1818 a8083063 Iustin Pop
      f = open(i, 'r')
1819 a8083063 Iustin Pop
      try:
1820 a8083063 Iustin Pop
        keyarray.append(f.read())
1821 a8083063 Iustin Pop
      finally:
1822 a8083063 Iustin Pop
        f.close()
1823 a8083063 Iustin Pop
1824 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
1825 72737a7f Iustin Pop
                                    keyarray[2],
1826 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
1827 a8083063 Iustin Pop
1828 a8083063 Iustin Pop
    if not result:
1829 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1830 a8083063 Iustin Pop
1831 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1832 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1833 c8a0948f Michael Hanselmann
1834 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1835 caad16e2 Iustin Pop
      if not self.rpc.call_node_has_ip_address(new_node.name,
1836 caad16e2 Iustin Pop
                                               new_node.secondary_ip):
1837 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1838 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1839 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1840 a8083063 Iustin Pop
1841 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
1842 5c0527ed Guido Trotter
    node_verify_param = {
1843 5c0527ed Guido Trotter
      'nodelist': [node],
1844 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1845 5c0527ed Guido Trotter
    }
1846 5c0527ed Guido Trotter
1847 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
1848 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
1849 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1850 5c0527ed Guido Trotter
      if not result[verifier]:
1851 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1852 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1853 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1854 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1855 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1856 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1857 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1858 ff98055b Iustin Pop
1859 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1860 a8083063 Iustin Pop
    # including the node just added
1861 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
1862 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1863 102b115b Michael Hanselmann
    if not self.op.readd:
1864 102b115b Michael Hanselmann
      dist_nodes.append(node)
1865 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1866 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1867 a8083063 Iustin Pop
1868 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1869 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1870 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
1871 a8083063 Iustin Pop
      for to_node in dist_nodes:
1872 a8083063 Iustin Pop
        if not result[to_node]:
1873 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1874 a8083063 Iustin Pop
                       (fname, to_node))
1875 a8083063 Iustin Pop
1876 d6a02168 Michael Hanselmann
    to_copy = []
1877 00cd937c Iustin Pop
    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
1878 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1879 a8083063 Iustin Pop
    for fname in to_copy:
1880 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
1881 b5602d15 Guido Trotter
      if not result[node]:
1882 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1883 a8083063 Iustin Pop
1884 d8470559 Michael Hanselmann
    if self.op.readd:
1885 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1886 d8470559 Michael Hanselmann
    else:
1887 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1888 a8083063 Iustin Pop
1889 a8083063 Iustin Pop
1890 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1891 a8083063 Iustin Pop
  """Query cluster configuration.
1892 a8083063 Iustin Pop

1893 a8083063 Iustin Pop
  """
1894 a8083063 Iustin Pop
  _OP_REQP = []
1895 59322403 Iustin Pop
  REQ_MASTER = False
1896 642339cf Guido Trotter
  REQ_BGL = False
1897 642339cf Guido Trotter
1898 642339cf Guido Trotter
  def ExpandNames(self):
1899 642339cf Guido Trotter
    self.needed_locks = {}
1900 a8083063 Iustin Pop
1901 a8083063 Iustin Pop
  def CheckPrereq(self):
1902 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1903 a8083063 Iustin Pop

1904 a8083063 Iustin Pop
    """
1905 a8083063 Iustin Pop
    pass
1906 a8083063 Iustin Pop
1907 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1908 a8083063 Iustin Pop
    """Return cluster config.
1909 a8083063 Iustin Pop

1910 a8083063 Iustin Pop
    """
1911 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1912 a8083063 Iustin Pop
    result = {
1913 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1914 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1915 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1916 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1917 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1918 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1919 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
1920 469f88e1 Iustin Pop
      "master": cluster.master_node,
1921 469f88e1 Iustin Pop
      "hypervisor_type": cluster.hypervisor,
1922 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
1923 469f88e1 Iustin Pop
      "hvparams": cluster.hvparams,
1924 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
1925 a8083063 Iustin Pop
      }
1926 a8083063 Iustin Pop
1927 a8083063 Iustin Pop
    return result
1928 a8083063 Iustin Pop
1929 a8083063 Iustin Pop
1930 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
1931 ae5849b5 Michael Hanselmann
  """Return configuration values.
1932 a8083063 Iustin Pop

1933 a8083063 Iustin Pop
  """
1934 a8083063 Iustin Pop
  _OP_REQP = []
1935 642339cf Guido Trotter
  REQ_BGL = False
1936 642339cf Guido Trotter
1937 642339cf Guido Trotter
  def ExpandNames(self):
1938 642339cf Guido Trotter
    self.needed_locks = {}
1939 a8083063 Iustin Pop
1940 3ccafd0e Iustin Pop
    static_fields = ["cluster_name", "master_node", "drain_flag"]
1941 ae5849b5 Michael Hanselmann
    _CheckOutputFields(static=static_fields,
1942 ae5849b5 Michael Hanselmann
                       dynamic=[],
1943 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
1944 ae5849b5 Michael Hanselmann
1945 a8083063 Iustin Pop
  def CheckPrereq(self):
1946 a8083063 Iustin Pop
    """No prerequisites.
1947 a8083063 Iustin Pop

1948 a8083063 Iustin Pop
    """
1949 a8083063 Iustin Pop
    pass
1950 a8083063 Iustin Pop
1951 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1952 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1953 a8083063 Iustin Pop

1954 a8083063 Iustin Pop
    """
1955 ae5849b5 Michael Hanselmann
    values = []
1956 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
1957 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
1958 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
1959 ae5849b5 Michael Hanselmann
      elif field == "master_node":
1960 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
1961 3ccafd0e Iustin Pop
      elif field == "drain_flag":
1962 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
1963 ae5849b5 Michael Hanselmann
      else:
1964 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
1965 3ccafd0e Iustin Pop
      values.append(entry)
1966 ae5849b5 Michael Hanselmann
    return values
1967 a8083063 Iustin Pop
1968 a8083063 Iustin Pop
1969 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1970 a8083063 Iustin Pop
  """Bring up an instance's disks.
1971 a8083063 Iustin Pop

1972 a8083063 Iustin Pop
  """
1973 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1974 f22a8ba3 Guido Trotter
  REQ_BGL = False
1975 f22a8ba3 Guido Trotter
1976 f22a8ba3 Guido Trotter
  def ExpandNames(self):
1977 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
1978 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
1979 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1980 f22a8ba3 Guido Trotter
1981 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
1982 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
1983 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
1984 a8083063 Iustin Pop
1985 a8083063 Iustin Pop
  def CheckPrereq(self):
1986 a8083063 Iustin Pop
    """Check prerequisites.
1987 a8083063 Iustin Pop

1988 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1989 a8083063 Iustin Pop

1990 a8083063 Iustin Pop
    """
1991 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1992 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
1993 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
1994 a8083063 Iustin Pop
1995 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1996 a8083063 Iustin Pop
    """Activate the disks.
1997 a8083063 Iustin Pop

1998 a8083063 Iustin Pop
    """
1999 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2000 a8083063 Iustin Pop
    if not disks_ok:
2001 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2002 a8083063 Iustin Pop
2003 a8083063 Iustin Pop
    return disks_info
2004 a8083063 Iustin Pop
2005 a8083063 Iustin Pop
2006 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2007 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2008 a8083063 Iustin Pop

2009 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2010 a8083063 Iustin Pop

2011 a8083063 Iustin Pop
  Args:
2012 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
2013 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
2014 a8083063 Iustin Pop
                        in an error return from the function
2015 a8083063 Iustin Pop

2016 a8083063 Iustin Pop
  Returns:
2017 a8083063 Iustin Pop
    false if the operation failed
2018 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
2019 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
2020 a8083063 Iustin Pop
  """
2021 a8083063 Iustin Pop
  device_info = []
2022 a8083063 Iustin Pop
  disks_ok = True
2023 fdbd668d Iustin Pop
  iname = instance.name
2024 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2025 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2026 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2027 fdbd668d Iustin Pop
2028 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2029 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2030 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2031 fdbd668d Iustin Pop
  # SyncSource, etc.)
2032 fdbd668d Iustin Pop
2033 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2034 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2035 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2036 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2037 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2038 a8083063 Iustin Pop
      if not result:
2039 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
2040 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
2041 fdbd668d Iustin Pop
        if not ignore_secondaries:
2042 a8083063 Iustin Pop
          disks_ok = False
2043 fdbd668d Iustin Pop
2044 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2045 fdbd668d Iustin Pop
2046 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2047 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2048 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2049 fdbd668d Iustin Pop
      if node != instance.primary_node:
2050 fdbd668d Iustin Pop
        continue
2051 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2052 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2053 fdbd668d Iustin Pop
      if not result:
2054 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
2055 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
2056 fdbd668d Iustin Pop
        disks_ok = False
2057 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
2058 a8083063 Iustin Pop
2059 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2060 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2061 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2062 b352ab5b Iustin Pop
  for disk in instance.disks:
2063 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2064 b352ab5b Iustin Pop
2065 a8083063 Iustin Pop
  return disks_ok, device_info
2066 a8083063 Iustin Pop
2067 a8083063 Iustin Pop
2068 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2069 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2070 3ecf6786 Iustin Pop

2071 3ecf6786 Iustin Pop
  """
2072 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2073 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2074 fe7b0351 Michael Hanselmann
  if not disks_ok:
2075 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2076 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2077 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
2078 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
2079 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2080 fe7b0351 Michael Hanselmann
2081 fe7b0351 Michael Hanselmann
2082 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2083 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2084 a8083063 Iustin Pop

2085 a8083063 Iustin Pop
  """
2086 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2087 f22a8ba3 Guido Trotter
  REQ_BGL = False
2088 f22a8ba3 Guido Trotter
2089 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2090 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2091 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2092 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2093 f22a8ba3 Guido Trotter
2094 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2095 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2096 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2097 a8083063 Iustin Pop
2098 a8083063 Iustin Pop
  def CheckPrereq(self):
2099 a8083063 Iustin Pop
    """Check prerequisites.
2100 a8083063 Iustin Pop

2101 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2102 a8083063 Iustin Pop

2103 a8083063 Iustin Pop
    """
2104 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2105 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2106 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2107 a8083063 Iustin Pop
2108 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2109 a8083063 Iustin Pop
    """Deactivate the disks
2110 a8083063 Iustin Pop

2111 a8083063 Iustin Pop
    """
2112 a8083063 Iustin Pop
    instance = self.instance
2113 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2114 a8083063 Iustin Pop
2115 a8083063 Iustin Pop
2116 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2117 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2118 155d6c75 Guido Trotter

2119 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2120 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2121 155d6c75 Guido Trotter

2122 155d6c75 Guido Trotter
  """
2123 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2124 72737a7f Iustin Pop
                                      [instance.hypervisor])
2125 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2126 155d6c75 Guido Trotter
  if not type(ins_l) is list:
2127 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2128 155d6c75 Guido Trotter
                             instance.primary_node)
2129 155d6c75 Guido Trotter
2130 155d6c75 Guido Trotter
  if instance.name in ins_l:
2131 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2132 155d6c75 Guido Trotter
                             " block devices.")
2133 155d6c75 Guido Trotter
2134 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2135 a8083063 Iustin Pop
2136 a8083063 Iustin Pop
2137 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2138 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2139 a8083063 Iustin Pop

2140 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2141 a8083063 Iustin Pop

2142 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2143 a8083063 Iustin Pop
  ignored.
2144 a8083063 Iustin Pop

2145 a8083063 Iustin Pop
  """
2146 a8083063 Iustin Pop
  result = True
2147 a8083063 Iustin Pop
  for disk in instance.disks:
2148 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2149 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2150 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_shutdown(node, top_disk):
2151 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
2152 a8083063 Iustin Pop
                     (disk.iv_name, node))
2153 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2154 a8083063 Iustin Pop
          result = False
2155 a8083063 Iustin Pop
  return result
2156 a8083063 Iustin Pop
2157 a8083063 Iustin Pop
2158 b9bddb6b Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
2159 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2160 d4f16fd9 Iustin Pop

2161 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2162 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2163 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2164 d4f16fd9 Iustin Pop
  exception.
2165 d4f16fd9 Iustin Pop

2166 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2167 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2168 e69d05fd Iustin Pop
  @type node: C{str}
2169 e69d05fd Iustin Pop
  @param node: the node to check
2170 e69d05fd Iustin Pop
  @type reason: C{str}
2171 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2172 e69d05fd Iustin Pop
  @type requested: C{int}
2173 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2174 e69d05fd Iustin Pop
  @type hypervisor: C{str}
2175 e69d05fd Iustin Pop
  @param hypervisor: the hypervisor to ask for memory stats
2176 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2177 e69d05fd Iustin Pop
      we cannot check the node
2178 d4f16fd9 Iustin Pop

2179 d4f16fd9 Iustin Pop
  """
2180 72737a7f Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
2181 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2182 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2183 d4f16fd9 Iustin Pop
                             " information" % (node,))
2184 d4f16fd9 Iustin Pop
2185 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2186 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2187 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2188 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2189 d4f16fd9 Iustin Pop
  if requested > free_mem:
2190 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2191 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2192 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2193 d4f16fd9 Iustin Pop
2194 d4f16fd9 Iustin Pop
2195 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2196 a8083063 Iustin Pop
  """Starts an instance.
2197 a8083063 Iustin Pop

2198 a8083063 Iustin Pop
  """
2199 a8083063 Iustin Pop
  HPATH = "instance-start"
2200 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2201 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2202 e873317a Guido Trotter
  REQ_BGL = False
2203 e873317a Guido Trotter
2204 e873317a Guido Trotter
  def ExpandNames(self):
2205 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2206 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2207 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2208 e873317a Guido Trotter
2209 e873317a Guido Trotter
  def DeclareLocks(self, level):
2210 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2211 e873317a Guido Trotter
      self._LockInstancesNodes()
2212 a8083063 Iustin Pop
2213 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2214 a8083063 Iustin Pop
    """Build hooks env.
2215 a8083063 Iustin Pop

2216 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2217 a8083063 Iustin Pop

2218 a8083063 Iustin Pop
    """
2219 a8083063 Iustin Pop
    env = {
2220 a8083063 Iustin Pop
      "FORCE": self.op.force,
2221 a8083063 Iustin Pop
      }
2222 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2223 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2224 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2225 a8083063 Iustin Pop
    return env, nl, nl
2226 a8083063 Iustin Pop
2227 a8083063 Iustin Pop
  def CheckPrereq(self):
2228 a8083063 Iustin Pop
    """Check prerequisites.
2229 a8083063 Iustin Pop

2230 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2231 a8083063 Iustin Pop

2232 a8083063 Iustin Pop
    """
2233 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2234 e873317a Guido Trotter
    assert self.instance is not None, \
2235 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2236 a8083063 Iustin Pop
2237 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2238 a8083063 Iustin Pop
    # check bridges existance
2239 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2240 a8083063 Iustin Pop
2241 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, instance.primary_node,
2242 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2243 338e51e8 Iustin Pop
                         bep[constants.BE_MEMORY], instance.hypervisor)
2244 d4f16fd9 Iustin Pop
2245 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2246 a8083063 Iustin Pop
    """Start the instance.
2247 a8083063 Iustin Pop

2248 a8083063 Iustin Pop
    """
2249 a8083063 Iustin Pop
    instance = self.instance
2250 a8083063 Iustin Pop
    force = self.op.force
2251 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2252 a8083063 Iustin Pop
2253 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2254 fe482621 Iustin Pop
2255 a8083063 Iustin Pop
    node_current = instance.primary_node
2256 a8083063 Iustin Pop
2257 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2258 a8083063 Iustin Pop
2259 72737a7f Iustin Pop
    if not self.rpc.call_instance_start(node_current, instance, extra_args):
2260 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2261 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2262 a8083063 Iustin Pop
2263 a8083063 Iustin Pop
2264 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2265 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2266 bf6929a2 Alexander Schreiber

2267 bf6929a2 Alexander Schreiber
  """
2268 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2269 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2270 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2271 e873317a Guido Trotter
  REQ_BGL = False
2272 e873317a Guido Trotter
2273 e873317a Guido Trotter
  def ExpandNames(self):
2274 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2275 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2276 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2277 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2278 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2279 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2280 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2281 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2282 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2283 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2284 e873317a Guido Trotter
2285 e873317a Guido Trotter
  def DeclareLocks(self, level):
2286 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2287 849da276 Guido Trotter
      primary_only = not constants.INSTANCE_REBOOT_FULL
2288 849da276 Guido Trotter
      self._LockInstancesNodes(primary_only=primary_only)
2289 bf6929a2 Alexander Schreiber
2290 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2291 bf6929a2 Alexander Schreiber
    """Build hooks env.
2292 bf6929a2 Alexander Schreiber

2293 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2294 bf6929a2 Alexander Schreiber

2295 bf6929a2 Alexander Schreiber
    """
2296 bf6929a2 Alexander Schreiber
    env = {
2297 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2298 bf6929a2 Alexander Schreiber
      }
2299 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2300 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2301 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2302 bf6929a2 Alexander Schreiber
    return env, nl, nl
2303 bf6929a2 Alexander Schreiber
2304 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2305 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2306 bf6929a2 Alexander Schreiber

2307 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2308 bf6929a2 Alexander Schreiber

2309 bf6929a2 Alexander Schreiber
    """
2310 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2311 e873317a Guido Trotter
    assert self.instance is not None, \
2312 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2313 bf6929a2 Alexander Schreiber
2314 bf6929a2 Alexander Schreiber
    # check bridges existance
2315 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2316 bf6929a2 Alexander Schreiber
2317 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2318 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2319 bf6929a2 Alexander Schreiber

2320 bf6929a2 Alexander Schreiber
    """
2321 bf6929a2 Alexander Schreiber
    instance = self.instance
2322 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2323 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2324 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2325 bf6929a2 Alexander Schreiber
2326 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2327 bf6929a2 Alexander Schreiber
2328 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2329 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2330 72737a7f Iustin Pop
      if not self.rpc.call_instance_reboot(node_current, instance,
2331 72737a7f Iustin Pop
                                           reboot_type, extra_args):
2332 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2333 bf6929a2 Alexander Schreiber
    else:
2334 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(node_current, instance):
2335 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2336 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2337 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
2338 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(node_current, instance, extra_args):
2339 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2340 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2341 bf6929a2 Alexander Schreiber
2342 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2343 bf6929a2 Alexander Schreiber
2344 bf6929a2 Alexander Schreiber
2345 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2346 a8083063 Iustin Pop
  """Shutdown an instance.
2347 a8083063 Iustin Pop

2348 a8083063 Iustin Pop
  """
2349 a8083063 Iustin Pop
  HPATH = "instance-stop"
2350 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2351 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2352 e873317a Guido Trotter
  REQ_BGL = False
2353 e873317a Guido Trotter
2354 e873317a Guido Trotter
  def ExpandNames(self):
2355 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2356 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2357 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2358 e873317a Guido Trotter
2359 e873317a Guido Trotter
  def DeclareLocks(self, level):
2360 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2361 e873317a Guido Trotter
      self._LockInstancesNodes()
2362 a8083063 Iustin Pop
2363 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2364 a8083063 Iustin Pop
    """Build hooks env.
2365 a8083063 Iustin Pop

2366 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2367 a8083063 Iustin Pop

2368 a8083063 Iustin Pop
    """
2369 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2370 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2371 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2372 a8083063 Iustin Pop
    return env, nl, nl
2373 a8083063 Iustin Pop
2374 a8083063 Iustin Pop
  def CheckPrereq(self):
2375 a8083063 Iustin Pop
    """Check prerequisites.
2376 a8083063 Iustin Pop

2377 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2378 a8083063 Iustin Pop

2379 a8083063 Iustin Pop
    """
2380 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2381 e873317a Guido Trotter
    assert self.instance is not None, \
2382 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2383 a8083063 Iustin Pop
2384 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2385 a8083063 Iustin Pop
    """Shutdown the instance.
2386 a8083063 Iustin Pop

2387 a8083063 Iustin Pop
    """
2388 a8083063 Iustin Pop
    instance = self.instance
2389 a8083063 Iustin Pop
    node_current = instance.primary_node
2390 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2391 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(node_current, instance):
2392 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2393 a8083063 Iustin Pop
2394 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
2395 a8083063 Iustin Pop
2396 a8083063 Iustin Pop
2397 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2398 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2399 fe7b0351 Michael Hanselmann

2400 fe7b0351 Michael Hanselmann
  """
2401 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2402 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2403 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2404 4e0b4d2d Guido Trotter
  REQ_BGL = False
2405 4e0b4d2d Guido Trotter
2406 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2407 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2408 4e0b4d2d Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2409 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2410 4e0b4d2d Guido Trotter
2411 4e0b4d2d Guido Trotter
  def DeclareLocks(self, level):
2412 4e0b4d2d Guido Trotter
    if level == locking.LEVEL_NODE:
2413 4e0b4d2d Guido Trotter
      self._LockInstancesNodes()
2414 fe7b0351 Michael Hanselmann
2415 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2416 fe7b0351 Michael Hanselmann
    """Build hooks env.
2417 fe7b0351 Michael Hanselmann

2418 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2419 fe7b0351 Michael Hanselmann

2420 fe7b0351 Michael Hanselmann
    """
2421 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2422 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2423 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2424 fe7b0351 Michael Hanselmann
    return env, nl, nl
2425 fe7b0351 Michael Hanselmann
2426 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2427 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2428 fe7b0351 Michael Hanselmann

2429 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2430 fe7b0351 Michael Hanselmann

2431 fe7b0351 Michael Hanselmann
    """
2432 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2433 4e0b4d2d Guido Trotter
    assert instance is not None, \
2434 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2435 4e0b4d2d Guido Trotter
2436 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2437 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2438 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2439 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2440 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2441 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2442 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2443 72737a7f Iustin Pop
                                              instance.name,
2444 72737a7f Iustin Pop
                                              instance.hypervisor)
2445 fe7b0351 Michael Hanselmann
    if remote_info:
2446 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2447 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2448 3ecf6786 Iustin Pop
                                  instance.primary_node))
2449 d0834de3 Michael Hanselmann
2450 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2451 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2452 d0834de3 Michael Hanselmann
      # OS verification
2453 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2454 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2455 d0834de3 Michael Hanselmann
      if pnode is None:
2456 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2457 3ecf6786 Iustin Pop
                                   self.op.pnode)
2458 72737a7f Iustin Pop
      os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
2459 dfa96ded Guido Trotter
      if not os_obj:
2460 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2461 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2462 d0834de3 Michael Hanselmann
2463 fe7b0351 Michael Hanselmann
    self.instance = instance
2464 fe7b0351 Michael Hanselmann
2465 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2466 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2467 fe7b0351 Michael Hanselmann

2468 fe7b0351 Michael Hanselmann
    """
2469 fe7b0351 Michael Hanselmann
    inst = self.instance
2470 fe7b0351 Michael Hanselmann
2471 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2472 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2473 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2474 97abc79f Iustin Pop
      self.cfg.Update(inst)
2475 d0834de3 Michael Hanselmann
2476 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2477 fe7b0351 Michael Hanselmann
    try:
2478 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2479 72737a7f Iustin Pop
      if not self.rpc.call_instance_os_add(inst.primary_node, inst,
2480 72737a7f Iustin Pop
                                           "sda", "sdb"):
2481 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2482 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2483 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2484 fe7b0351 Michael Hanselmann
    finally:
2485 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2486 fe7b0351 Michael Hanselmann
2487 fe7b0351 Michael Hanselmann
2488 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2489 decd5f45 Iustin Pop
  """Rename an instance.
2490 decd5f45 Iustin Pop

2491 decd5f45 Iustin Pop
  """
2492 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2493 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2494 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2495 decd5f45 Iustin Pop
2496 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2497 decd5f45 Iustin Pop
    """Build hooks env.
2498 decd5f45 Iustin Pop

2499 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2500 decd5f45 Iustin Pop

2501 decd5f45 Iustin Pop
    """
2502 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2503 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2504 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2505 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2506 decd5f45 Iustin Pop
    return env, nl, nl
2507 decd5f45 Iustin Pop
2508 decd5f45 Iustin Pop
  def CheckPrereq(self):
2509 decd5f45 Iustin Pop
    """Check prerequisites.
2510 decd5f45 Iustin Pop

2511 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2512 decd5f45 Iustin Pop

2513 decd5f45 Iustin Pop
    """
2514 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2515 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2516 decd5f45 Iustin Pop
    if instance is None:
2517 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2518 decd5f45 Iustin Pop
                                 self.op.instance_name)
2519 decd5f45 Iustin Pop
    if instance.status != "down":
2520 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2521 decd5f45 Iustin Pop
                                 self.op.instance_name)
2522 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2523 72737a7f Iustin Pop
                                              instance.name,
2524 72737a7f Iustin Pop
                                              instance.hypervisor)
2525 decd5f45 Iustin Pop
    if remote_info:
2526 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2527 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2528 decd5f45 Iustin Pop
                                  instance.primary_node))
2529 decd5f45 Iustin Pop
    self.instance = instance
2530 decd5f45 Iustin Pop
2531 decd5f45 Iustin Pop
    # new name verification
2532 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2533 decd5f45 Iustin Pop
2534 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2535 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2536 7bde3275 Guido Trotter
    if new_name in instance_list:
2537 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2538 c09f363f Manuel Franceschini
                                 new_name)
2539 7bde3275 Guido Trotter
2540 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2541 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2542 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2543 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2544 decd5f45 Iustin Pop
2545 decd5f45 Iustin Pop
2546 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2547 decd5f45 Iustin Pop
    """Reinstall the instance.
2548 decd5f45 Iustin Pop

2549 decd5f45 Iustin Pop
    """
2550 decd5f45 Iustin Pop
    inst = self.instance
2551 decd5f45 Iustin Pop
    old_name = inst.name
2552 decd5f45 Iustin Pop
2553 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2554 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2555 b23c4333 Manuel Franceschini
2556 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2557 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2558 74b5913f Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
2559 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2560 decd5f45 Iustin Pop
2561 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2562 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2563 decd5f45 Iustin Pop
2564 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2565 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2566 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2567 72737a7f Iustin Pop
                                                     old_file_storage_dir,
2568 72737a7f Iustin Pop
                                                     new_file_storage_dir)
2569 b23c4333 Manuel Franceschini
2570 b23c4333 Manuel Franceschini
      if not result:
2571 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2572 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2573 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2574 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2575 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2576 b23c4333 Manuel Franceschini
2577 b23c4333 Manuel Franceschini
      if not result[0]:
2578 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2579 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2580 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2581 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2582 b23c4333 Manuel Franceschini
2583 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2584 decd5f45 Iustin Pop
    try:
2585 72737a7f Iustin Pop
      if not self.rpc.call_instance_run_rename(inst.primary_node, inst,
2586 d15a9ad3 Guido Trotter
                                               old_name):
2587 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2588 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2589 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2590 decd5f45 Iustin Pop
        logger.Error(msg)
2591 decd5f45 Iustin Pop
    finally:
2592 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2593 decd5f45 Iustin Pop
2594 decd5f45 Iustin Pop
2595 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2596 a8083063 Iustin Pop
  """Remove an instance.
2597 a8083063 Iustin Pop

2598 a8083063 Iustin Pop
  """
2599 a8083063 Iustin Pop
  HPATH = "instance-remove"
2600 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2601 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2602 cf472233 Guido Trotter
  REQ_BGL = False
2603 cf472233 Guido Trotter
2604 cf472233 Guido Trotter
  def ExpandNames(self):
2605 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
2606 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2607 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2608 cf472233 Guido Trotter
2609 cf472233 Guido Trotter
  def DeclareLocks(self, level):
2610 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
2611 cf472233 Guido Trotter
      self._LockInstancesNodes()
2612 a8083063 Iustin Pop
2613 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2614 a8083063 Iustin Pop
    """Build hooks env.
2615 a8083063 Iustin Pop

2616 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2617 a8083063 Iustin Pop

2618 a8083063 Iustin Pop
    """
2619 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2620 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
2621 a8083063 Iustin Pop
    return env, nl, nl
2622 a8083063 Iustin Pop
2623 a8083063 Iustin Pop
  def CheckPrereq(self):
2624 a8083063 Iustin Pop
    """Check prerequisites.
2625 a8083063 Iustin Pop

2626 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2627 a8083063 Iustin Pop

2628 a8083063 Iustin Pop
    """
2629 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2630 cf472233 Guido Trotter
    assert self.instance is not None, \
2631 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2632 a8083063 Iustin Pop
2633 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2634 a8083063 Iustin Pop
    """Remove the instance.
2635 a8083063 Iustin Pop

2636 a8083063 Iustin Pop
    """
2637 a8083063 Iustin Pop
    instance = self.instance
2638 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2639 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2640 a8083063 Iustin Pop
2641 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(instance.primary_node, instance):
2642 1d67656e Iustin Pop
      if self.op.ignore_failures:
2643 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2644 1d67656e Iustin Pop
      else:
2645 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2646 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2647 a8083063 Iustin Pop
2648 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2649 a8083063 Iustin Pop
2650 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
2651 1d67656e Iustin Pop
      if self.op.ignore_failures:
2652 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2653 1d67656e Iustin Pop
      else:
2654 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2655 a8083063 Iustin Pop
2656 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2657 a8083063 Iustin Pop
2658 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2659 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2660 a8083063 Iustin Pop
2661 a8083063 Iustin Pop
2662 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2663 a8083063 Iustin Pop
  """Logical unit for querying instances.
2664 a8083063 Iustin Pop

2665 a8083063 Iustin Pop
  """
2666 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2667 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2668 a8083063 Iustin Pop
2669 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2670 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2671 338e51e8 Iustin Pop
    hvp = ["hv/%s" % name for name in constants.HVS_PARAMETERS]
2672 338e51e8 Iustin Pop
    bep = ["be/%s" % name for name in constants.BES_PARAMETERS]
2673 57a2fb91 Iustin Pop
    self.static_fields = frozenset([
2674 57a2fb91 Iustin Pop
      "name", "os", "pnode", "snodes",
2675 57a2fb91 Iustin Pop
      "admin_state", "admin_ram",
2676 57a2fb91 Iustin Pop
      "disk_template", "ip", "mac", "bridge",
2677 57a2fb91 Iustin Pop
      "sda_size", "sdb_size", "vcpus", "tags",
2678 5018a335 Iustin Pop
      "network_port",
2679 5018a335 Iustin Pop
      "serial_no", "hypervisor", "hvparams",
2680 338e51e8 Iustin Pop
      ] + hvp + bep)
2681 338e51e8 Iustin Pop
2682 57a2fb91 Iustin Pop
    _CheckOutputFields(static=self.static_fields,
2683 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2684 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2685 a8083063 Iustin Pop
2686 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2687 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2688 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2689 7eb9d8f7 Guido Trotter
2690 57a2fb91 Iustin Pop
    if self.op.names:
2691 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
2692 7eb9d8f7 Guido Trotter
    else:
2693 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
2694 7eb9d8f7 Guido Trotter
2695 57a2fb91 Iustin Pop
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
2696 57a2fb91 Iustin Pop
    if self.do_locking:
2697 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2698 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
2699 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2700 7eb9d8f7 Guido Trotter
2701 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2702 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
2703 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2704 7eb9d8f7 Guido Trotter
2705 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2706 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2707 7eb9d8f7 Guido Trotter

2708 7eb9d8f7 Guido Trotter
    """
2709 57a2fb91 Iustin Pop
    pass
2710 069dcc86 Iustin Pop
2711 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2712 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2713 a8083063 Iustin Pop

2714 a8083063 Iustin Pop
    """
2715 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
2716 57a2fb91 Iustin Pop
    if self.do_locking:
2717 57a2fb91 Iustin Pop
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2718 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2719 3fa93523 Guido Trotter
      instance_names = self.wanted
2720 3fa93523 Guido Trotter
      missing = set(instance_names).difference(all_info.keys())
2721 3fa93523 Guido Trotter
      if missing:
2722 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2723 3fa93523 Guido Trotter
          "Some instances were removed before retrieving their data: %s"
2724 3fa93523 Guido Trotter
          % missing)
2725 57a2fb91 Iustin Pop
    else:
2726 57a2fb91 Iustin Pop
      instance_names = all_info.keys()
2727 c1f1cbb2 Iustin Pop
2728 c1f1cbb2 Iustin Pop
    instance_names = utils.NiceSort(instance_names)
2729 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
2730 a8083063 Iustin Pop
2731 a8083063 Iustin Pop
    # begin data gathering
2732 a8083063 Iustin Pop
2733 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2734 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
2735 a8083063 Iustin Pop
2736 a8083063 Iustin Pop
    bad_nodes = []
2737 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2738 a8083063 Iustin Pop
      live_data = {}
2739 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
2740 a8083063 Iustin Pop
      for name in nodes:
2741 a8083063 Iustin Pop
        result = node_data[name]
2742 a8083063 Iustin Pop
        if result:
2743 a8083063 Iustin Pop
          live_data.update(result)
2744 a8083063 Iustin Pop
        elif result == False:
2745 a8083063 Iustin Pop
          bad_nodes.append(name)
2746 a8083063 Iustin Pop
        # else no instance is alive
2747 a8083063 Iustin Pop
    else:
2748 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2749 a8083063 Iustin Pop
2750 a8083063 Iustin Pop
    # end data gathering
2751 a8083063 Iustin Pop
2752 5018a335 Iustin Pop
    HVPREFIX = "hv/"
2753 338e51e8 Iustin Pop
    BEPREFIX = "be/"
2754 a8083063 Iustin Pop
    output = []
2755 a8083063 Iustin Pop
    for instance in instance_list:
2756 a8083063 Iustin Pop
      iout = []
2757 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
2758 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
2759 a8083063 Iustin Pop
      for field in self.op.output_fields:
2760 a8083063 Iustin Pop
        if field == "name":
2761 a8083063 Iustin Pop
          val = instance.name
2762 a8083063 Iustin Pop
        elif field == "os":
2763 a8083063 Iustin Pop
          val = instance.os
2764 a8083063 Iustin Pop
        elif field == "pnode":
2765 a8083063 Iustin Pop
          val = instance.primary_node
2766 a8083063 Iustin Pop
        elif field == "snodes":
2767 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2768 a8083063 Iustin Pop
        elif field == "admin_state":
2769 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2770 a8083063 Iustin Pop
        elif field == "oper_state":
2771 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2772 8a23d2d3 Iustin Pop
            val = None
2773 a8083063 Iustin Pop
          else:
2774 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2775 d8052456 Iustin Pop
        elif field == "status":
2776 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2777 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2778 d8052456 Iustin Pop
          else:
2779 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2780 d8052456 Iustin Pop
            if running:
2781 d8052456 Iustin Pop
              if instance.status != "down":
2782 d8052456 Iustin Pop
                val = "running"
2783 d8052456 Iustin Pop
              else:
2784 d8052456 Iustin Pop
                val = "ERROR_up"
2785 d8052456 Iustin Pop
            else:
2786 d8052456 Iustin Pop
              if instance.status != "down":
2787 d8052456 Iustin Pop
                val = "ERROR_down"
2788 d8052456 Iustin Pop
              else:
2789 d8052456 Iustin Pop
                val = "ADMIN_down"
2790 a8083063 Iustin Pop
        elif field == "oper_ram":
2791 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2792 8a23d2d3 Iustin Pop
            val = None
2793 a8083063 Iustin Pop
          elif instance.name in live_data:
2794 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2795 a8083063 Iustin Pop
          else:
2796 a8083063 Iustin Pop
            val = "-"
2797 a8083063 Iustin Pop
        elif field == "disk_template":
2798 a8083063 Iustin Pop
          val = instance.disk_template
2799 a8083063 Iustin Pop
        elif field == "ip":
2800 a8083063 Iustin Pop
          val = instance.nics[0].ip
2801 a8083063 Iustin Pop
        elif field == "bridge":
2802 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2803 a8083063 Iustin Pop
        elif field == "mac":
2804 a8083063 Iustin Pop
          val = instance.nics[0].mac
2805 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2806 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2807 644eeef9 Iustin Pop
          if disk is None:
2808 8a23d2d3 Iustin Pop
            val = None
2809 644eeef9 Iustin Pop
          else:
2810 644eeef9 Iustin Pop
            val = disk.size
2811 130a6a6f Iustin Pop
        elif field == "tags":
2812 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2813 38d7239a Iustin Pop
        elif field == "serial_no":
2814 38d7239a Iustin Pop
          val = instance.serial_no
2815 5018a335 Iustin Pop
        elif field == "network_port":
2816 5018a335 Iustin Pop
          val = instance.network_port
2817 338e51e8 Iustin Pop
        elif field == "hypervisor":
2818 338e51e8 Iustin Pop
          val = instance.hypervisor
2819 338e51e8 Iustin Pop
        elif field == "hvparams":
2820 338e51e8 Iustin Pop
          val = i_hv
2821 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
2822 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
2823 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
2824 338e51e8 Iustin Pop
        elif field == "beparams":
2825 338e51e8 Iustin Pop
          val = i_be
2826 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
2827 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
2828 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
2829 a8083063 Iustin Pop
        else:
2830 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2831 a8083063 Iustin Pop
        iout.append(val)
2832 a8083063 Iustin Pop
      output.append(iout)
2833 a8083063 Iustin Pop
2834 a8083063 Iustin Pop
    return output
2835 a8083063 Iustin Pop
2836 a8083063 Iustin Pop
2837 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2838 a8083063 Iustin Pop
  """Failover an instance.
2839 a8083063 Iustin Pop

2840 a8083063 Iustin Pop
  """
2841 a8083063 Iustin Pop
  HPATH = "instance-failover"
2842 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2843 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2844 c9e5c064 Guido Trotter
  REQ_BGL = False
2845 c9e5c064 Guido Trotter
2846 c9e5c064 Guido Trotter
  def ExpandNames(self):
2847 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2848 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2849 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2850 c9e5c064 Guido Trotter
2851 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2852 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2853 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2854 a8083063 Iustin Pop
2855 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2856 a8083063 Iustin Pop
    """Build hooks env.
2857 a8083063 Iustin Pop

2858 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2859 a8083063 Iustin Pop

2860 a8083063 Iustin Pop
    """
2861 a8083063 Iustin Pop
    env = {
2862 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2863 a8083063 Iustin Pop
      }
2864 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2865 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
2866 a8083063 Iustin Pop
    return env, nl, nl
2867 a8083063 Iustin Pop
2868 a8083063 Iustin Pop
  def CheckPrereq(self):
2869 a8083063 Iustin Pop
    """Check prerequisites.
2870 a8083063 Iustin Pop

2871 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2872 a8083063 Iustin Pop

2873 a8083063 Iustin Pop
    """
2874 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2875 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2876 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2877 a8083063 Iustin Pop
2878 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2879 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2880 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2881 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2882 2a710df1 Michael Hanselmann
2883 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2884 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2885 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2886 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2887 2a710df1 Michael Hanselmann
2888 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2889 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2890 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
2891 338e51e8 Iustin Pop
                         instance.name, bep[constants.BE_MEMORY],
2892 e69d05fd Iustin Pop
                         instance.hypervisor)
2893 3a7c308e Guido Trotter
2894 a8083063 Iustin Pop
    # check bridge existance
2895 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2896 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(target_node, brlist):
2897 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2898 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2899 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2900 a8083063 Iustin Pop
2901 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2902 a8083063 Iustin Pop
    """Failover an instance.
2903 a8083063 Iustin Pop

2904 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2905 a8083063 Iustin Pop
    starting it on the secondary.
2906 a8083063 Iustin Pop

2907 a8083063 Iustin Pop
    """
2908 a8083063 Iustin Pop
    instance = self.instance
2909 a8083063 Iustin Pop
2910 a8083063 Iustin Pop
    source_node = instance.primary_node
2911 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2912 a8083063 Iustin Pop
2913 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2914 a8083063 Iustin Pop
    for dev in instance.disks:
2915 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2916 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
2917 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2918 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2919 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2920 a8083063 Iustin Pop
2921 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2922 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2923 a8083063 Iustin Pop
                (instance.name, source_node))
2924 a8083063 Iustin Pop
2925 72737a7f Iustin Pop
    if not self.rpc.call_instance_shutdown(source_node, instance):
2926 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2927 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2928 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2929 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2930 24a40d57 Iustin Pop
      else:
2931 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2932 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2933 a8083063 Iustin Pop
2934 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2935 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
2936 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2937 a8083063 Iustin Pop
2938 a8083063 Iustin Pop
    instance.primary_node = target_node
2939 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2940 b6102dab Guido Trotter
    self.cfg.Update(instance)
2941 a8083063 Iustin Pop
2942 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2943 12a0cfbe Guido Trotter
    if instance.status == "up":
2944 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2945 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2946 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2947 12a0cfbe Guido Trotter
2948 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
2949 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2950 12a0cfbe Guido Trotter
      if not disks_ok:
2951 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2952 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2953 a8083063 Iustin Pop
2954 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2955 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(target_node, instance, None):
2956 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2957 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2958 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2959 a8083063 Iustin Pop
2960 a8083063 Iustin Pop
2961 b9bddb6b Iustin Pop
def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
2962 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2963 a8083063 Iustin Pop

2964 a8083063 Iustin Pop
  This always creates all devices.
2965 a8083063 Iustin Pop

2966 a8083063 Iustin Pop
  """
2967 a8083063 Iustin Pop
  if device.children:
2968 a8083063 Iustin Pop
    for child in device.children:
2969 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
2970 a8083063 Iustin Pop
        return False
2971 a8083063 Iustin Pop
2972 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
2973 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
2974 72737a7f Iustin Pop
                                       instance.name, True, info)
2975 a8083063 Iustin Pop
  if not new_id:
2976 a8083063 Iustin Pop
    return False
2977 a8083063 Iustin Pop
  if device.physical_id is None:
2978 a8083063 Iustin Pop
    device.physical_id = new_id
2979 a8083063 Iustin Pop
  return True
2980 a8083063 Iustin Pop
2981 a8083063 Iustin Pop
2982 b9bddb6b Iustin Pop
def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
2983 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2984 a8083063 Iustin Pop

2985 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2986 a8083063 Iustin Pop
  all its children.
2987 a8083063 Iustin Pop

2988 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2989 a8083063 Iustin Pop

2990 a8083063 Iustin Pop
  """
2991 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2992 a8083063 Iustin Pop
    force = True
2993 a8083063 Iustin Pop
  if device.children:
2994 a8083063 Iustin Pop
    for child in device.children:
2995 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, node, instance,
2996 3f78eef2 Iustin Pop
                                        child, force, info):
2997 a8083063 Iustin Pop
        return False
2998 a8083063 Iustin Pop
2999 a8083063 Iustin Pop
  if not force:
3000 a8083063 Iustin Pop
    return True
3001 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3002 72737a7f Iustin Pop
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3003 72737a7f Iustin Pop
                                       instance.name, False, info)
3004 a8083063 Iustin Pop
  if not new_id:
3005 a8083063 Iustin Pop
    return False
3006 a8083063 Iustin Pop
  if device.physical_id is None:
3007 a8083063 Iustin Pop
    device.physical_id = new_id
3008 a8083063 Iustin Pop
  return True
3009 a8083063 Iustin Pop
3010 a8083063 Iustin Pop
3011 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
3012 923b1523 Iustin Pop
  """Generate a suitable LV name.
3013 923b1523 Iustin Pop

3014 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
3015 923b1523 Iustin Pop

3016 923b1523 Iustin Pop
  """
3017 923b1523 Iustin Pop
  results = []
3018 923b1523 Iustin Pop
  for val in exts:
3019 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
3020 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
3021 923b1523 Iustin Pop
  return results
3022 923b1523 Iustin Pop
3023 923b1523 Iustin Pop
3024 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3025 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
3026 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
3027 a1f445d3 Iustin Pop

3028 a1f445d3 Iustin Pop
  """
3029 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
3030 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3031 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
3032 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3033 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
3034 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3035 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
3036 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3037 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
3038 f9518d38 Iustin Pop
                                      p_minor, s_minor,
3039 f9518d38 Iustin Pop
                                      shared_secret),
3040 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
3041 a1f445d3 Iustin Pop
                          iv_name=iv_name)
3042 a1f445d3 Iustin Pop
  return drbd_dev
3043 a1f445d3 Iustin Pop
3044 7c0d6283 Michael Hanselmann
3045 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
3046 a8083063 Iustin Pop
                          instance_name, primary_node,
3047 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
3048 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
3049 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
3050 a8083063 Iustin Pop

3051 a8083063 Iustin Pop
  """
3052 a8083063 Iustin Pop
  #TODO: compute space requirements
3053 a8083063 Iustin Pop
3054 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3055 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
3056 a8083063 Iustin Pop
    disks = []
3057 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
3058 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
3059 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3060 923b1523 Iustin Pop
3061 b9bddb6b Iustin Pop
    names = _GenerateUniqueNames(lu, [".sda", ".sdb"])
3062 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
3063 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
3064 a8083063 Iustin Pop
                           iv_name = "sda")
3065 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
3066 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
3067 a8083063 Iustin Pop
                           iv_name = "sdb")
3068 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
3069 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
3070 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
3071 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3072 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
3073 ffa1c0dc Iustin Pop
    (minor_pa, minor_pb,
3074 b9bddb6b Iustin Pop
     minor_sa, minor_sb) = lu.cfg.AllocateDRBDMinor(
3075 a1578d63 Iustin Pop
      [primary_node, primary_node, remote_node, remote_node], instance_name)
3076 ffa1c0dc Iustin Pop
3077 b9bddb6b Iustin Pop
    names = _GenerateUniqueNames(lu, [".sda_data", ".sda_meta",
3078 b9bddb6b Iustin Pop
                                      ".sdb_data", ".sdb_meta"])
3079 b9bddb6b Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3080 ffa1c0dc Iustin Pop
                                        disk_sz, names[0:2], "sda",
3081 ffa1c0dc Iustin Pop
                                        minor_pa, minor_sa)
3082 b9bddb6b Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3083 ffa1c0dc Iustin Pop
                                        swap_sz, names[2:4], "sdb",
3084 ffa1c0dc Iustin Pop
                                        minor_pb, minor_sb)
3085 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
3086 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3087 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3088 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3089 0f1a06e3 Manuel Franceschini
3090 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
3091 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
3092 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
3093 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
3094 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
3095 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
3096 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
3097 a8083063 Iustin Pop
  else:
3098 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3099 a8083063 Iustin Pop
  return disks
3100 a8083063 Iustin Pop
3101 a8083063 Iustin Pop
3102 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3103 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3104 3ecf6786 Iustin Pop

3105 3ecf6786 Iustin Pop
  """
3106 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3107 a0c3fea1 Michael Hanselmann
3108 a0c3fea1 Michael Hanselmann
3109 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
3110 a8083063 Iustin Pop
  """Create all disks for an instance.
3111 a8083063 Iustin Pop

3112 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3113 a8083063 Iustin Pop

3114 a8083063 Iustin Pop
  Args:
3115 a8083063 Iustin Pop
    instance: the instance object
3116 a8083063 Iustin Pop

3117 a8083063 Iustin Pop
  Returns:
3118 a8083063 Iustin Pop
    True or False showing the success of the creation process
3119 a8083063 Iustin Pop

3120 a8083063 Iustin Pop
  """
3121 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3122 a0c3fea1 Michael Hanselmann
3123 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3124 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3125 72737a7f Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(instance.primary_node,
3126 72737a7f Iustin Pop
                                                 file_storage_dir)
3127 0f1a06e3 Manuel Franceschini
3128 0f1a06e3 Manuel Franceschini
    if not result:
3129 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
3130 0f1a06e3 Manuel Franceschini
      return False
3131 0f1a06e3 Manuel Franceschini
3132 0f1a06e3 Manuel Franceschini
    if not result[0]:
3133 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
3134 0f1a06e3 Manuel Franceschini
      return False
3135 0f1a06e3 Manuel Franceschini
3136 a8083063 Iustin Pop
  for device in instance.disks:
3137 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
3138 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
3139 a8083063 Iustin Pop
    #HARDCODE
3140 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3141 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
3142 3f78eef2 Iustin Pop
                                        device, False, info):
3143 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
3144 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
3145 a8083063 Iustin Pop
        return False
3146 a8083063 Iustin Pop
    #HARDCODE
3147 b9bddb6b Iustin Pop
    if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
3148 3f78eef2 Iustin Pop
                                    instance, device, info):
3149 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
3150 a8083063 Iustin Pop
                   device.iv_name)
3151 a8083063 Iustin Pop
      return False
3152 1c6e3627 Manuel Franceschini
3153 a8083063 Iustin Pop
  return True
3154 a8083063 Iustin Pop
3155 a8083063 Iustin Pop
3156 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
3157 a8083063 Iustin Pop
  """Remove all disks for an instance.
3158 a8083063 Iustin Pop

3159 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3160 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3161 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3162 a8083063 Iustin Pop
  with `_CreateDisks()`).
3163 a8083063 Iustin Pop

3164 a8083063 Iustin Pop
  Args:
3165 a8083063 Iustin Pop
    instance: the instance object
3166 a8083063 Iustin Pop

3167 a8083063 Iustin Pop
  Returns:
3168 a8083063 Iustin Pop
    True or False showing the success of the removal proces
3169 a8083063 Iustin Pop

3170 a8083063 Iustin Pop
  """
3171 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
3172 a8083063 Iustin Pop
3173 a8083063 Iustin Pop
  result = True
3174 a8083063 Iustin Pop
  for device in instance.disks:
3175 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3176 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
3177 72737a7f Iustin Pop
      if not lu.rpc.call_blockdev_remove(node, disk):
3178 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
3179 a8083063 Iustin Pop
                     " continuing anyway" %
3180 a8083063 Iustin Pop
                     (device.iv_name, node))
3181 a8083063 Iustin Pop
        result = False
3182 0f1a06e3 Manuel Franceschini
3183 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3184 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3185 72737a7f Iustin Pop
    if not lu.rpc.call_file_storage_dir_remove(instance.primary_node,
3186 72737a7f Iustin Pop
                                               file_storage_dir):
3187 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
3188 0f1a06e3 Manuel Franceschini
      result = False
3189 0f1a06e3 Manuel Franceschini
3190 a8083063 Iustin Pop
  return result
3191 a8083063 Iustin Pop
3192 a8083063 Iustin Pop
3193 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
3194 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3195 e2fe6369 Iustin Pop

3196 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
3197 e2fe6369 Iustin Pop

3198 e2fe6369 Iustin Pop
  """
3199 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3200 e2fe6369 Iustin Pop
  req_size_dict = {
3201 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3202 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
3203 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
3204 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
3205 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3206 e2fe6369 Iustin Pop
  }
3207 e2fe6369 Iustin Pop
3208 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3209 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3210 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3211 e2fe6369 Iustin Pop
3212 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3213 e2fe6369 Iustin Pop
3214 e2fe6369 Iustin Pop
3215 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
3216 74409b12 Iustin Pop
  """Hypervisor parameter validation.
3217 74409b12 Iustin Pop

3218 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
3219 74409b12 Iustin Pop
  used in both instance create and instance modify.
3220 74409b12 Iustin Pop

3221 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
3222 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
3223 74409b12 Iustin Pop
  @type nodenames: list
3224 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
3225 74409b12 Iustin Pop
  @type hvname: string
3226 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
3227 74409b12 Iustin Pop
  @type hvparams: dict
3228 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
3229 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
3230 74409b12 Iustin Pop

3231 74409b12 Iustin Pop
  """
3232 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
3233 74409b12 Iustin Pop
                                                  hvname,
3234 74409b12 Iustin Pop
                                                  hvparams)
3235 74409b12 Iustin Pop
  for node in nodenames:
3236 74409b12 Iustin Pop
    info = hvinfo.get(node, None)
3237 74409b12 Iustin Pop
    if not info or not isinstance(info, (tuple, list)):
3238 74409b12 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
3239 74409b12 Iustin Pop
                                 " from node '%s' (%s)" % (node, info))
3240 74409b12 Iustin Pop
    if not info[0]:
3241 74409b12 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
3242 74409b12 Iustin Pop
                                 " %s" % info[1])
3243 74409b12 Iustin Pop
3244 74409b12 Iustin Pop
3245 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3246 a8083063 Iustin Pop
  """Create an instance.
3247 a8083063 Iustin Pop

3248 a8083063 Iustin Pop
  """
3249 a8083063 Iustin Pop
  HPATH = "instance-add"
3250 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3251 338e51e8 Iustin Pop
  _OP_REQP = ["instance_name", "disk_size",
3252 338e51e8 Iustin Pop
              "disk_template", "swap_size", "mode", "start",
3253 338e51e8 Iustin Pop
              "wait_for_sync", "ip_check", "mac",
3254 338e51e8 Iustin Pop
              "hvparams", "beparams"]
3255 7baf741d Guido Trotter
  REQ_BGL = False
3256 7baf741d Guido Trotter
3257 7baf741d Guido Trotter
  def _ExpandNode(self, node):
3258 7baf741d Guido Trotter
    """Expands and checks one node name.
3259 7baf741d Guido Trotter

3260 7baf741d Guido Trotter
    """
3261 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
3262 7baf741d Guido Trotter
    if node_full is None:
3263 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
3264 7baf741d Guido Trotter
    return node_full
3265 7baf741d Guido Trotter
3266 7baf741d Guido Trotter
  def ExpandNames(self):
3267 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
3268 7baf741d Guido Trotter

3269 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
3270 7baf741d Guido Trotter

3271 7baf741d Guido Trotter
    """
3272 7baf741d Guido Trotter
    self.needed_locks = {}
3273 7baf741d Guido Trotter
3274 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
3275 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
3276 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
3277 7baf741d Guido Trotter
        setattr(self.op, attr, None)
3278 7baf741d Guido Trotter
3279 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
3280 4b2f38dd Iustin Pop
3281 7baf741d Guido Trotter
    # verify creation mode
3282 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
3283 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
3284 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3285 7baf741d Guido Trotter
                                 self.op.mode)
3286 4b2f38dd Iustin Pop
3287 7baf741d Guido Trotter
    # disk template and mirror node verification
3288 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3289 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
3290 7baf741d Guido Trotter
3291 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
3292 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
3293 4b2f38dd Iustin Pop
3294 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3295 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
3296 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
3297 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3298 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
3299 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
3300 4b2f38dd Iustin Pop
3301 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
3302 6785674e Iustin Pop
3303 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
3304 8705eb96 Iustin Pop
                                  self.op.hvparams)
3305 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
3306 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
3307 6785674e Iustin Pop
3308 338e51e8 Iustin Pop
    # fill and remember the beparams dict
3309 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
3310 338e51e8 Iustin Pop
                                    self.op.beparams)
3311 338e51e8 Iustin Pop
3312 7baf741d Guido Trotter
    #### instance parameters check
3313 7baf741d Guido Trotter
3314 7baf741d Guido Trotter
    # instance name verification
3315 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
3316 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
3317 7baf741d Guido Trotter
3318 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
3319 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
3320 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
3321 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3322 7baf741d Guido Trotter
                                 instance_name)
3323 7baf741d Guido Trotter
3324 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3325 7baf741d Guido Trotter
3326 7baf741d Guido Trotter
    # ip validity checks
3327 7baf741d Guido Trotter
    ip = getattr(self.op, "ip", None)
3328 7baf741d Guido Trotter
    if ip is None or ip.lower() == "none":
3329 7baf741d Guido Trotter
      inst_ip = None
3330 7baf741d Guido Trotter
    elif ip.lower() == "auto":
3331 7baf741d Guido Trotter
      inst_ip = hostname1.ip
3332 7baf741d Guido Trotter
    else:
3333 7baf741d Guido Trotter
      if not utils.IsValidIP(ip):
3334 7baf741d Guido Trotter
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3335 7baf741d Guido Trotter
                                   " like a valid IP" % ip)
3336 7baf741d Guido Trotter
      inst_ip = ip
3337 7baf741d Guido Trotter
    self.inst_ip = self.op.ip = inst_ip
3338 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
3339 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
3340 7baf741d Guido Trotter
3341 7baf741d Guido Trotter
    # MAC address verification
3342 7baf741d Guido Trotter
    if self.op.mac != "auto":
3343 7baf741d Guido Trotter
      if not utils.IsValidMac(self.op.mac.lower()):
3344 7baf741d Guido Trotter
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3345 7baf741d Guido Trotter
                                   self.op.mac)
3346 7baf741d Guido Trotter
3347 7baf741d Guido Trotter
    # file storage checks
3348 7baf741d Guido Trotter
    if (self.op.file_driver and
3349 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
3350 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3351 7baf741d Guido Trotter
                                 self.op.file_driver)
3352 7baf741d Guido Trotter
3353 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3354 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
3355 7baf741d Guido Trotter
3356 7baf741d Guido Trotter
    ### Node/iallocator related checks
3357 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3358 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3359 7baf741d Guido Trotter
                                 " node must be given")
3360 7baf741d Guido Trotter
3361 7baf741d Guido Trotter
    if self.op.iallocator:
3362 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3363 7baf741d Guido Trotter
    else:
3364 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
3365 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
3366 7baf741d Guido Trotter
      if self.op.snode is not None:
3367 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
3368 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
3369 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3370 7baf741d Guido Trotter
3371 7baf741d Guido Trotter
    # in case of import lock the source node too
3372 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
3373 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
3374 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
3375 7baf741d Guido Trotter
3376 7baf741d Guido Trotter
      if src_node is None or src_path is None:
3377 7baf741d Guido Trotter
        raise errors.OpPrereqError("Importing an instance requires source"
3378 7baf741d Guido Trotter
                                   " node and path options")
3379 7baf741d Guido Trotter
3380 7baf741d Guido Trotter
      if not os.path.isabs(src_path):
3381 7baf741d Guido Trotter
        raise errors.OpPrereqError("The source path must be absolute")
3382 7baf741d Guido Trotter
3383 7baf741d Guido Trotter
      self.op.src_node = src_node = self._ExpandNode(src_node)
3384 7baf741d Guido Trotter
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3385 7baf741d Guido Trotter
        self.needed_locks[locking.LEVEL_NODE].append(src_node)
3386 7baf741d Guido Trotter
3387 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
3388 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
3389 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
3390 a8083063 Iustin Pop
3391 538475ca Iustin Pop
  def _RunAllocator(self):
3392 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3393 538475ca Iustin Pop

3394 538475ca Iustin Pop
    """
3395 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3396 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3397 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3398 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3399 72737a7f Iustin Pop
    ial = IAllocator(self,
3400 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3401 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3402 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3403 d1c2dd75 Iustin Pop
                     tags=[],
3404 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3405 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
3406 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
3407 d1c2dd75 Iustin Pop
                     disks=disks,
3408 d1c2dd75 Iustin Pop
                     nics=nics,
3409 29859cb7 Iustin Pop
                     )
3410 d1c2dd75 Iustin Pop
3411 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3412 d1c2dd75 Iustin Pop
3413 d1c2dd75 Iustin Pop
    if not ial.success:
3414 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3415 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3416 d1c2dd75 Iustin Pop
                                                           ial.info))
3417 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3418 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3419 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3420 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
3421 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
3422 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3423 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
3424 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
3425 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
3426 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
3427 27579978 Iustin Pop
    if ial.required_nodes == 2:
3428 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3429 538475ca Iustin Pop
3430 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3431 a8083063 Iustin Pop
    """Build hooks env.
3432 a8083063 Iustin Pop

3433 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3434 a8083063 Iustin Pop

3435 a8083063 Iustin Pop
    """
3436 a8083063 Iustin Pop
    env = {
3437 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3438 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3439 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3440 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3441 a8083063 Iustin Pop
      }
3442 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3443 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3444 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3445 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
3446 396e1b78 Michael Hanselmann
3447 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3448 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3449 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3450 396e1b78 Michael Hanselmann
      status=self.instance_status,
3451 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3452 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
3453 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
3454 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3455 396e1b78 Michael Hanselmann
    ))
3456 a8083063 Iustin Pop
3457 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
3458 a8083063 Iustin Pop
          self.secondaries)
3459 a8083063 Iustin Pop
    return env, nl, nl
3460 a8083063 Iustin Pop
3461 a8083063 Iustin Pop
3462 a8083063 Iustin Pop
  def CheckPrereq(self):
3463 a8083063 Iustin Pop
    """Check prerequisites.
3464 a8083063 Iustin Pop

3465 a8083063 Iustin Pop
    """
3466 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3467 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3468 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3469 eedc99de Manuel Franceschini
                                 " instances")
3470 eedc99de Manuel Franceschini
3471 e69d05fd Iustin Pop
3472 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3473 7baf741d Guido Trotter
      src_node = self.op.src_node
3474 7baf741d Guido Trotter
      src_path = self.op.src_path
3475 a8083063 Iustin Pop
3476 72737a7f Iustin Pop
      export_info = self.rpc.call_export_info(src_node, src_path)
3477 a8083063 Iustin Pop
3478 a8083063 Iustin Pop
      if not export_info:
3479 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3480 a8083063 Iustin Pop
3481 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3482 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3483 a8083063 Iustin Pop
3484 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3485 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3486 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3487 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3488 a8083063 Iustin Pop
3489 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3490 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3491 3ecf6786 Iustin Pop
                                   " one data disk")
3492 a8083063 Iustin Pop
3493 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3494 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3495 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3496 a8083063 Iustin Pop
                                                         'disk0_dump'))
3497 a8083063 Iustin Pop
      self.src_image = diskimage
3498 901a65c1 Iustin Pop
3499 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3500 901a65c1 Iustin Pop
3501 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3502 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3503 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3504 901a65c1 Iustin Pop
3505 901a65c1 Iustin Pop
    if self.op.ip_check:
3506 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3507 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3508 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
3509 901a65c1 Iustin Pop
3510 901a65c1 Iustin Pop
    # bridge verification
3511 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3512 901a65c1 Iustin Pop
    if bridge is None:
3513 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3514 901a65c1 Iustin Pop
    else:
3515 901a65c1 Iustin Pop
      self.op.bridge = bridge
3516 901a65c1 Iustin Pop
3517 538475ca Iustin Pop
    #### allocator run
3518 538475ca Iustin Pop
3519 538475ca Iustin Pop
    if self.op.iallocator is not None:
3520 538475ca Iustin Pop
      self._RunAllocator()
3521 0f1a06e3 Manuel Franceschini
3522 901a65c1 Iustin Pop
    #### node related checks
3523 901a65c1 Iustin Pop
3524 901a65c1 Iustin Pop
    # check primary node
3525 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3526 7baf741d Guido Trotter
    assert self.pnode is not None, \
3527 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
3528 901a65c1 Iustin Pop
    self.secondaries = []
3529 901a65c1 Iustin Pop
3530 901a65c1 Iustin Pop
    # mirror node verification
3531 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3532 7baf741d Guido Trotter
      if self.op.snode is None:
3533 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3534 3ecf6786 Iustin Pop
                                   " a mirror node")
3535 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
3536 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3537 3ecf6786 Iustin Pop
                                   " the primary node.")
3538 7baf741d Guido Trotter
      self.secondaries.append(self.op.snode)
3539 a8083063 Iustin Pop
3540 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
3541 6785674e Iustin Pop
3542 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3543 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3544 ed1ebc60 Guido Trotter
3545 8d75db10 Iustin Pop
    # Check lv size requirements
3546 8d75db10 Iustin Pop
    if req_size is not None:
3547 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3548 72737a7f Iustin Pop
                                         self.op.hypervisor)
3549 8d75db10 Iustin Pop
      for node in nodenames:
3550 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3551 8d75db10 Iustin Pop
        if not info:
3552 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3553 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3554 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3555 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3556 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3557 8d75db10 Iustin Pop
                                     " node %s" % node)
3558 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3559 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3560 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3561 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3562 ed1ebc60 Guido Trotter
3563 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
3564 6785674e Iustin Pop
3565 a8083063 Iustin Pop
    # os verification
3566 72737a7f Iustin Pop
    os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
3567 dfa96ded Guido Trotter
    if not os_obj:
3568 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3569 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3570 a8083063 Iustin Pop
3571 901a65c1 Iustin Pop
    # bridge check on primary node
3572 72737a7f Iustin Pop
    if not self.rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3573 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3574 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3575 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3576 a8083063 Iustin Pop
3577 49ce1563 Iustin Pop
    # memory check on primary node
3578 49ce1563 Iustin Pop
    if self.op.start:
3579 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
3580 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3581 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
3582 338e51e8 Iustin Pop
                           self.op.hypervisor)
3583 49ce1563 Iustin Pop
3584 a8083063 Iustin Pop
    if self.op.start:
3585 a8083063 Iustin Pop
      self.instance_status = 'up'
3586 a8083063 Iustin Pop
    else:
3587 a8083063 Iustin Pop
      self.instance_status = 'down'
3588 a8083063 Iustin Pop
3589 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3590 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3591 a8083063 Iustin Pop

3592 a8083063 Iustin Pop
    """
3593 a8083063 Iustin Pop
    instance = self.op.instance_name
3594 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3595 a8083063 Iustin Pop
3596 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3597 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3598 1862d460 Alexander Schreiber
    else:
3599 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3600 1862d460 Alexander Schreiber
3601 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3602 a8083063 Iustin Pop
    if self.inst_ip is not None:
3603 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3604 a8083063 Iustin Pop
3605 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
3606 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3607 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3608 2a6469d5 Alexander Schreiber
    else:
3609 2a6469d5 Alexander Schreiber
      network_port = None
3610 58acb49d Alexander Schreiber
3611 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
3612 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3613 31a853d2 Iustin Pop
3614 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3615 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3616 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3617 2c313123 Manuel Franceschini
    else:
3618 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3619 2c313123 Manuel Franceschini
3620 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3621 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3622 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
3623 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3624 0f1a06e3 Manuel Franceschini
3625 0f1a06e3 Manuel Franceschini
3626 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
3627 a8083063 Iustin Pop
                                  self.op.disk_template,
3628 a8083063 Iustin Pop
                                  instance, pnode_name,
3629 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3630 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3631 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3632 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3633 a8083063 Iustin Pop
3634 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3635 a8083063 Iustin Pop
                            primary_node=pnode_name,
3636 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3637 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3638 a8083063 Iustin Pop
                            status=self.instance_status,
3639 58acb49d Alexander Schreiber
                            network_port=network_port,
3640 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
3641 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
3642 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
3643 a8083063 Iustin Pop
                            )
3644 a8083063 Iustin Pop
3645 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3646 b9bddb6b Iustin Pop
    if not _CreateDisks(self, iobj):
3647 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3648 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance)
3649 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3650 a8083063 Iustin Pop
3651 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3652 a8083063 Iustin Pop
3653 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3654 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
3655 7baf741d Guido Trotter
    # added the instance to the config
3656 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
3657 a1578d63 Iustin Pop
    # Remove the temp. assignements for the instance's drbds
3658 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance)
3659 a8083063 Iustin Pop
3660 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3661 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
3662 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3663 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3664 a8083063 Iustin Pop
      time.sleep(15)
3665 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3666 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
3667 a8083063 Iustin Pop
    else:
3668 a8083063 Iustin Pop
      disk_abort = False
3669 a8083063 Iustin Pop
3670 a8083063 Iustin Pop
    if disk_abort:
3671 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
3672 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3673 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
3674 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3675 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3676 3ecf6786 Iustin Pop
                               " this instance")
3677 a8083063 Iustin Pop
3678 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3679 a8083063 Iustin Pop
                (instance, pnode_name))
3680 a8083063 Iustin Pop
3681 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3682 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3683 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3684 d15a9ad3 Guido Trotter
        if not self.rpc.call_instance_os_add(pnode_name, iobj):
3685 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3686 3ecf6786 Iustin Pop
                                   " on node %s" %
3687 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3688 a8083063 Iustin Pop
3689 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3690 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3691 a8083063 Iustin Pop
        src_node = self.op.src_node
3692 a8083063 Iustin Pop
        src_image = self.src_image
3693 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
3694 72737a7f Iustin Pop
        if not self.rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3695 72737a7f Iustin Pop
                                                src_node, src_image,
3696 72737a7f Iustin Pop
                                                cluster_name):
3697 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3698 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3699 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3700 a8083063 Iustin Pop
      else:
3701 a8083063 Iustin Pop
        # also checked in the prereq part
3702 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3703 3ecf6786 Iustin Pop
                                     % self.op.mode)
3704 a8083063 Iustin Pop
3705 a8083063 Iustin Pop
    if self.op.start:
3706 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3707 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3708 72737a7f Iustin Pop
      if not self.rpc.call_instance_start(pnode_name, iobj, None):
3709 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3710 a8083063 Iustin Pop
3711 a8083063 Iustin Pop
3712 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3713 a8083063 Iustin Pop
  """Connect to an instance's console.
3714 a8083063 Iustin Pop

3715 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3716 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3717 a8083063 Iustin Pop
  console.
3718 a8083063 Iustin Pop

3719 a8083063 Iustin Pop
  """
3720 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3721 8659b73e Guido Trotter
  REQ_BGL = False
3722 8659b73e Guido Trotter
3723 8659b73e Guido Trotter
  def ExpandNames(self):
3724 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3725 a8083063 Iustin Pop
3726 a8083063 Iustin Pop
  def CheckPrereq(self):
3727 a8083063 Iustin Pop
    """Check prerequisites.
3728 a8083063 Iustin Pop

3729 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3730 a8083063 Iustin Pop

3731 a8083063 Iustin Pop
    """
3732 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3733 8659b73e Guido Trotter
    assert self.instance is not None, \
3734 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3735 a8083063 Iustin Pop
3736 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3737 a8083063 Iustin Pop
    """Connect to the console of an instance
3738 a8083063 Iustin Pop

3739 a8083063 Iustin Pop
    """
3740 a8083063 Iustin Pop
    instance = self.instance
3741 a8083063 Iustin Pop
    node = instance.primary_node
3742 a8083063 Iustin Pop
3743 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
3744 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
3745 a8083063 Iustin Pop
    if node_insts is False:
3746 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3747 a8083063 Iustin Pop
3748 a8083063 Iustin Pop
    if instance.name not in node_insts:
3749 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3750 a8083063 Iustin Pop
3751 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3752 a8083063 Iustin Pop
3753 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
3754 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3755 b047857b Michael Hanselmann
3756 82122173 Iustin Pop
    # build ssh cmdline
3757 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3758 a8083063 Iustin Pop
3759 a8083063 Iustin Pop
3760 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3761 a8083063 Iustin Pop
  """Replace the disks of an instance.
3762 a8083063 Iustin Pop

3763 a8083063 Iustin Pop
  """
3764 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3765 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3766 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3767 efd990e4 Guido Trotter
  REQ_BGL = False
3768 efd990e4 Guido Trotter
3769 efd990e4 Guido Trotter
  def ExpandNames(self):
3770 efd990e4 Guido Trotter
    self._ExpandAndLockInstance()
3771 efd990e4 Guido Trotter
3772 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
3773 efd990e4 Guido Trotter
      self.op.remote_node = None
3774 efd990e4 Guido Trotter
3775 efd990e4 Guido Trotter
    ia_name = getattr(self.op, "iallocator", None)
3776 efd990e4 Guido Trotter
    if ia_name is not None:
3777 efd990e4 Guido Trotter
      if self.op.remote_node is not None:
3778 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
3779 efd990e4 Guido Trotter
                                   " secondary, not both")
3780 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3781 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
3782 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3783 efd990e4 Guido Trotter
      if remote_node is None:
3784 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
3785 efd990e4 Guido Trotter
                                   self.op.remote_node)
3786 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
3787 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
3788 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3789 efd990e4 Guido Trotter
    else:
3790 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
3791 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3792 efd990e4 Guido Trotter
3793 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
3794 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
3795 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
3796 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
3797 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
3798 efd990e4 Guido Trotter
      self._LockInstancesNodes()
3799 a8083063 Iustin Pop
3800 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3801 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3802 b6e82a65 Iustin Pop

3803 b6e82a65 Iustin Pop
    """
3804 72737a7f Iustin Pop
    ial = IAllocator(self,
3805 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3806 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3807 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3808 b6e82a65 Iustin Pop
3809 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3810 b6e82a65 Iustin Pop
3811 b6e82a65 Iustin Pop
    if not ial.success:
3812 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3813 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3814 b6e82a65 Iustin Pop
                                                           ial.info))
3815 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3816 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3817 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3818 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3819 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3820 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3821 b6e82a65 Iustin Pop
                    self.op.remote_node)
3822 b6e82a65 Iustin Pop
3823 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3824 a8083063 Iustin Pop
    """Build hooks env.
3825 a8083063 Iustin Pop

3826 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3827 a8083063 Iustin Pop

3828 a8083063 Iustin Pop
    """
3829 a8083063 Iustin Pop
    env = {
3830 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3831 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3832 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3833 a8083063 Iustin Pop
      }
3834 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3835 0834c866 Iustin Pop
    nl = [
3836 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
3837 0834c866 Iustin Pop
      self.instance.primary_node,
3838 0834c866 Iustin Pop
      ]
3839 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3840 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3841 a8083063 Iustin Pop
    return env, nl, nl
3842 a8083063 Iustin Pop
3843 a8083063 Iustin Pop
  def CheckPrereq(self):
3844 a8083063 Iustin Pop
    """Check prerequisites.
3845 a8083063 Iustin Pop

3846 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3847 a8083063 Iustin Pop

3848 a8083063 Iustin Pop
    """
3849 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3850 efd990e4 Guido Trotter
    assert instance is not None, \
3851 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3852 a8083063 Iustin Pop
    self.instance = instance
3853 a8083063 Iustin Pop
3854 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3855 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3856 a9e0c397 Iustin Pop
                                 " network mirrored.")
3857 a8083063 Iustin Pop
3858 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3859 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3860 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3861 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3862 a8083063 Iustin Pop
3863 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3864 a9e0c397 Iustin Pop
3865 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3866 b6e82a65 Iustin Pop
    if ia_name is not None:
3867 de8c7666 Guido Trotter
      self._RunAllocator()
3868 b6e82a65 Iustin Pop
3869 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3870 a9e0c397 Iustin Pop
    if remote_node is not None:
3871 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3872 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
3873 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
3874 a9e0c397 Iustin Pop
    else:
3875 a9e0c397 Iustin Pop
      self.remote_node_info = None
3876 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3877 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3878 3ecf6786 Iustin Pop
                                 " the instance.")
3879 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3880 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3881 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3882 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3883 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3884 0834c866 Iustin Pop
                                   " replacement")
3885 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3886 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3887 7df43a76 Iustin Pop
          remote_node is not None):
3888 7df43a76 Iustin Pop
        # switch to replace secondary mode
3889 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3890 7df43a76 Iustin Pop
3891 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3892 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3893 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3894 a9e0c397 Iustin Pop
                                   " both at once")
3895 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3896 a9e0c397 Iustin Pop
        if remote_node is not None:
3897 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3898 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3899 a9e0c397 Iustin Pop
                                     " node disk replacement")
3900 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3901 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3902 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3903 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3904 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3905 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3906 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3907 a9e0c397 Iustin Pop
      else:
3908 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3909 a9e0c397 Iustin Pop
3910 a9e0c397 Iustin Pop
    for name in self.op.disks:
3911 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3912 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3913 a9e0c397 Iustin Pop
                                   (name, instance.name))
3914 a8083063 Iustin Pop
3915 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3916 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3917 a9e0c397 Iustin Pop

3918 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3919 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3920 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3921 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3922 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3923 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3924 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3925 a9e0c397 Iustin Pop
      - wait for sync across all devices
3926 a9e0c397 Iustin Pop
      - for each modified disk:
3927 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3928 a9e0c397 Iustin Pop

3929 a9e0c397 Iustin Pop
    Failures are not very well handled.
3930 cff90b79 Iustin Pop

3931 a9e0c397 Iustin Pop
    """
3932 cff90b79 Iustin Pop
    steps_total = 6
3933 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3934 a9e0c397 Iustin Pop
    instance = self.instance
3935 a9e0c397 Iustin Pop
    iv_names = {}
3936 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3937 a9e0c397 Iustin Pop
    # start of work
3938 a9e0c397 Iustin Pop
    cfg = self.cfg
3939 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3940 cff90b79 Iustin Pop
    oth_node = self.oth_node
3941 cff90b79 Iustin Pop
3942 cff90b79 Iustin Pop
    # Step: check device activation
3943 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3944 cff90b79 Iustin Pop
    info("checking volume groups")
3945 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3946 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
3947 cff90b79 Iustin Pop
    if not results:
3948 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3949 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3950 cff90b79 Iustin Pop
      res = results.get(node, False)
3951 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3952 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3953 cff90b79 Iustin Pop
                                 (my_vg, node))
3954 cff90b79 Iustin Pop
    for dev in instance.disks:
3955 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3956 cff90b79 Iustin Pop
        continue
3957 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3958 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3959 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3960 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_find(node, dev):
3961 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3962 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3963 cff90b79 Iustin Pop
3964 cff90b79 Iustin Pop
    # Step: check other node consistency
3965 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3966 cff90b79 Iustin Pop
    for dev in instance.disks:
3967 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3968 cff90b79 Iustin Pop
        continue
3969 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3970 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
3971 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3972 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3973 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3974 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3975 cff90b79 Iustin Pop
3976 cff90b79 Iustin Pop
    # Step: create new storage
3977 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3978 a9e0c397 Iustin Pop
    for dev in instance.disks:
3979 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3980 a9e0c397 Iustin Pop
        continue
3981 a9e0c397 Iustin Pop
      size = dev.size
3982 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3983 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3984 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
3985 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3986 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3987 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3988 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3989 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3990 a9e0c397 Iustin Pop
      old_lvs = dev.children
3991 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3992 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3993 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3994 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3995 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3996 a9e0c397 Iustin Pop
      # are talking about the secondary node
3997 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3998 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
3999 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4000 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4001 a9e0c397 Iustin Pop
                                   " node '%s'" %
4002 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
4003 a9e0c397 Iustin Pop
4004 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
4005 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
4006 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
4007 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
4008 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
4009 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
4010 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
4011 cff90b79 Iustin Pop
      #dev.children = []
4012 cff90b79 Iustin Pop
      #cfg.Update(instance)
4013 a9e0c397 Iustin Pop
4014 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
4015 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
4016 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
4017 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
4018 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
4019 cff90b79 Iustin Pop
4020 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
4021 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
4022 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
4023 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
4024 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
4025 cff90b79 Iustin Pop
      rlist = []
4026 cff90b79 Iustin Pop
      for to_ren in old_lvs:
4027 72737a7f Iustin Pop
        find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
4028 cff90b79 Iustin Pop
        if find_res is not None: # device exists
4029 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
4030 cff90b79 Iustin Pop
4031 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
4032 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4033 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
4034 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
4035 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
4036 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
4037 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4038 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
4039 cff90b79 Iustin Pop
4040 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
4041 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
4042 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
4043 a9e0c397 Iustin Pop
4044 cff90b79 Iustin Pop
      for disk in old_lvs:
4045 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
4046 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
4047 a9e0c397 Iustin Pop
4048 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
4049 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
4050 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
4051 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
4052 72737a7f Iustin Pop
          if not self.rpc.call_blockdev_remove(tgt_node, new_lv):
4053 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
4054 cff90b79 Iustin Pop
                    " logical volumes")
4055 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
4056 a9e0c397 Iustin Pop
4057 a9e0c397 Iustin Pop
      dev.children = new_lvs
4058 a9e0c397 Iustin Pop
      cfg.Update(instance)
4059 a9e0c397 Iustin Pop
4060 cff90b79 Iustin Pop
    # Step: wait for sync
4061 a9e0c397 Iustin Pop
4062 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4063 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4064 a9e0c397 Iustin Pop
    # return value
4065 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4066 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4067 a9e0c397 Iustin Pop
4068 a9e0c397 Iustin Pop
    # so check manually all the devices
4069 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4070 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
4071 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(instance.primary_node, dev)[5]
4072 a9e0c397 Iustin Pop
      if is_degr:
4073 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4074 a9e0c397 Iustin Pop
4075 cff90b79 Iustin Pop
    # Step: remove old storage
4076 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4077 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4078 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
4079 a9e0c397 Iustin Pop
      for lv in old_lvs:
4080 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
4081 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(tgt_node, lv):
4082 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
4083 a9e0c397 Iustin Pop
          continue
4084 a9e0c397 Iustin Pop
4085 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
4086 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
4087 a9e0c397 Iustin Pop

4088 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4089 a9e0c397 Iustin Pop
      - for all disks of the instance:
4090 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
4091 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
4092 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
4093 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
4094 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
4095 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
4096 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
4097 a9e0c397 Iustin Pop
          not network enabled
4098 a9e0c397 Iustin Pop
      - wait for sync across all devices
4099 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
4100 a9e0c397 Iustin Pop

4101 a9e0c397 Iustin Pop
    Failures are not very well handled.
4102 0834c866 Iustin Pop

4103 a9e0c397 Iustin Pop
    """
4104 0834c866 Iustin Pop
    steps_total = 6
4105 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4106 a9e0c397 Iustin Pop
    instance = self.instance
4107 a9e0c397 Iustin Pop
    iv_names = {}
4108 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4109 a9e0c397 Iustin Pop
    # start of work
4110 a9e0c397 Iustin Pop
    cfg = self.cfg
4111 a9e0c397 Iustin Pop
    old_node = self.tgt_node
4112 a9e0c397 Iustin Pop
    new_node = self.new_node
4113 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
4114 0834c866 Iustin Pop
4115 0834c866 Iustin Pop
    # Step: check device activation
4116 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4117 0834c866 Iustin Pop
    info("checking volume groups")
4118 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
4119 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
4120 0834c866 Iustin Pop
    if not results:
4121 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4122 0834c866 Iustin Pop
    for node in pri_node, new_node:
4123 0834c866 Iustin Pop
      res = results.get(node, False)
4124 0834c866 Iustin Pop
      if not res or my_vg not in res:
4125 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4126 0834c866 Iustin Pop
                                 (my_vg, node))
4127 0834c866 Iustin Pop
    for dev in instance.disks:
4128 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4129 0834c866 Iustin Pop
        continue
4130 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
4131 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4132 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4133 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
4134 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
4135 0834c866 Iustin Pop
4136 0834c866 Iustin Pop
    # Step: check other node consistency
4137 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4138 0834c866 Iustin Pop
    for dev in instance.disks:
4139 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4140 0834c866 Iustin Pop
        continue
4141 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
4142 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
4143 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4144 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4145 0834c866 Iustin Pop
                                 pri_node)
4146 0834c866 Iustin Pop
4147 0834c866 Iustin Pop
    # Step: create new storage
4148 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4149 468b46f9 Iustin Pop
    for dev in instance.disks:
4150 a9e0c397 Iustin Pop
      size = dev.size
4151 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
4152 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4153 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4154 a9e0c397 Iustin Pop
      # are talking about the secondary node
4155 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4156 b9bddb6b Iustin Pop
        if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
4157 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4158 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4159 a9e0c397 Iustin Pop
                                   " node '%s'" %
4160 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4161 a9e0c397 Iustin Pop
4162 0834c866 Iustin Pop
4163 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
4164 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
4165 a1578d63 Iustin Pop
    # error and the success paths
4166 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4167 a1578d63 Iustin Pop
                                   instance.name)
4168 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
4169 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4170 468b46f9 Iustin Pop
    for dev, new_minor in zip(instance.disks, minors):
4171 0834c866 Iustin Pop
      size = dev.size
4172 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
4173 a9e0c397 Iustin Pop
      # create new devices on new_node
4174 ffa1c0dc Iustin Pop
      if pri_node == dev.logical_id[0]:
4175 ffa1c0dc Iustin Pop
        new_logical_id = (pri_node, new_node,
4176 f9518d38 Iustin Pop
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4177 f9518d38 Iustin Pop
                          dev.logical_id[5])
4178 ffa1c0dc Iustin Pop
      else:
4179 ffa1c0dc Iustin Pop
        new_logical_id = (new_node, pri_node,
4180 f9518d38 Iustin Pop
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4181 f9518d38 Iustin Pop
                          dev.logical_id[5])
4182 468b46f9 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children, new_logical_id)
4183 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4184 a1578d63 Iustin Pop
                    new_logical_id)
4185 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4186 ffa1c0dc Iustin Pop
                              logical_id=new_logical_id,
4187 a9e0c397 Iustin Pop
                              children=dev.children)
4188 b9bddb6b Iustin Pop
      if not _CreateBlockDevOnSecondary(self, new_node, instance,
4189 3f78eef2 Iustin Pop
                                        new_drbd, False,
4190 b9bddb6b Iustin Pop
                                        _GetInstanceInfoText(instance)):
4191 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4192 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4193 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4194 a9e0c397 Iustin Pop
4195 0834c866 Iustin Pop
    for dev in instance.disks:
4196 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4197 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
4198 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4199 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_shutdown(old_node, dev):
4200 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
4201 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4202 a9e0c397 Iustin Pop
4203 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4204 642445d9 Iustin Pop
    done = 0
4205 642445d9 Iustin Pop
    for dev in instance.disks:
4206 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4207 f9518d38 Iustin Pop
      # set the network part of the physical (unique in bdev terms) id
4208 f9518d38 Iustin Pop
      # to None, meaning detach from network
4209 f9518d38 Iustin Pop
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4210 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4211 642445d9 Iustin Pop
      # standalone state
4212 72737a7f Iustin Pop
      if self.rpc.call_blockdev_find(pri_node, dev):
4213 642445d9 Iustin Pop
        done += 1
4214 642445d9 Iustin Pop
      else:
4215 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
4216 642445d9 Iustin Pop
                dev.iv_name)
4217 642445d9 Iustin Pop
4218 642445d9 Iustin Pop
    if not done:
4219 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4220 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
4221 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4222 642445d9 Iustin Pop
4223 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4224 642445d9 Iustin Pop
    # the instance to point to the new secondary
4225 642445d9 Iustin Pop
    info("updating instance configuration")
4226 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
4227 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
4228 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4229 642445d9 Iustin Pop
    cfg.Update(instance)
4230 a1578d63 Iustin Pop
    # we can remove now the temp minors as now the new values are
4231 a1578d63 Iustin Pop
    # written to the config file (and therefore stable)
4232 a1578d63 Iustin Pop
    self.cfg.ReleaseDRBDMinors(instance.name)
4233 a9e0c397 Iustin Pop
4234 642445d9 Iustin Pop
    # and now perform the drbd attach
4235 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4236 642445d9 Iustin Pop
    failures = []
4237 642445d9 Iustin Pop
    for dev in instance.disks:
4238 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
4239 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4240 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4241 642445d9 Iustin Pop
      # is correct
4242 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4243 ffa1c0dc Iustin Pop
      logging.debug("Disk to attach: %s", dev)
4244 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_find(pri_node, dev):
4245 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
4246 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4247 a9e0c397 Iustin Pop
4248 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4249 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4250 a9e0c397 Iustin Pop
    # return value
4251 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4252 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
4253 a9e0c397 Iustin Pop
4254 a9e0c397 Iustin Pop
    # so check manually all the devices
4255 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4256 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4257 72737a7f Iustin Pop
      is_degr = self.rpc.call_blockdev_find(pri_node, dev)[5]
4258 a9e0c397 Iustin Pop
      if is_degr:
4259 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4260 a9e0c397 Iustin Pop
4261 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4262 ffa1c0dc Iustin Pop
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4263 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
4264 a9e0c397 Iustin Pop
      for lv in old_lvs:
4265 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4266 72737a7f Iustin Pop
        if not self.rpc.call_blockdev_remove(old_node, lv):
4267 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4268 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4269 a9e0c397 Iustin Pop
4270 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4271 a9e0c397 Iustin Pop
    """Execute disk replacement.
4272 a9e0c397 Iustin Pop

4273 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4274 a9e0c397 Iustin Pop

4275 a9e0c397 Iustin Pop
    """
4276 a9e0c397 Iustin Pop
    instance = self.instance
4277 22985314 Guido Trotter
4278 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
4279 22985314 Guido Trotter
    if instance.status == "down":
4280 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
4281 22985314 Guido Trotter
4282 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4283 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4284 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4285 a9e0c397 Iustin Pop
      else:
4286 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4287 a9e0c397 Iustin Pop
    else:
4288 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4289 22985314 Guido Trotter
4290 22985314 Guido Trotter
    ret = fn(feedback_fn)
4291 22985314 Guido Trotter
4292 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
4293 22985314 Guido Trotter
    if instance.status == "down":
4294 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
4295 22985314 Guido Trotter
4296 22985314 Guido Trotter
    return ret
4297 a9e0c397 Iustin Pop
4298 a8083063 Iustin Pop
4299 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4300 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4301 8729e0d7 Iustin Pop

4302 8729e0d7 Iustin Pop
  """
4303 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4304 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4305 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
4306 31e63dbf Guido Trotter
  REQ_BGL = False
4307 31e63dbf Guido Trotter
4308 31e63dbf Guido Trotter
  def ExpandNames(self):
4309 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4310 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4311 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4312 31e63dbf Guido Trotter
4313 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4314 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4315 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4316 8729e0d7 Iustin Pop
4317 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4318 8729e0d7 Iustin Pop
    """Build hooks env.
4319 8729e0d7 Iustin Pop

4320 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4321 8729e0d7 Iustin Pop

4322 8729e0d7 Iustin Pop
    """
4323 8729e0d7 Iustin Pop
    env = {
4324 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4325 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4326 8729e0d7 Iustin Pop
      }
4327 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4328 8729e0d7 Iustin Pop
    nl = [
4329 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4330 8729e0d7 Iustin Pop
      self.instance.primary_node,
4331 8729e0d7 Iustin Pop
      ]
4332 8729e0d7 Iustin Pop
    return env, nl, nl
4333 8729e0d7 Iustin Pop
4334 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4335 8729e0d7 Iustin Pop
    """Check prerequisites.
4336 8729e0d7 Iustin Pop

4337 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4338 8729e0d7 Iustin Pop

4339 8729e0d7 Iustin Pop
    """
4340 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4341 31e63dbf Guido Trotter
    assert instance is not None, \
4342 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4343 31e63dbf Guido Trotter
4344 8729e0d7 Iustin Pop
    self.instance = instance
4345 8729e0d7 Iustin Pop
4346 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4347 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4348 8729e0d7 Iustin Pop
                                 " growing.")
4349 8729e0d7 Iustin Pop
4350 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
4351 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4352 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
4353 8729e0d7 Iustin Pop
4354 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4355 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4356 72737a7f Iustin Pop
                                       instance.hypervisor)
4357 8729e0d7 Iustin Pop
    for node in nodenames:
4358 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4359 8729e0d7 Iustin Pop
      if not info:
4360 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4361 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4362 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4363 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4364 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4365 8729e0d7 Iustin Pop
                                   " node %s" % node)
4366 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4367 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4368 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4369 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4370 8729e0d7 Iustin Pop
4371 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4372 8729e0d7 Iustin Pop
    """Execute disk grow.
4373 8729e0d7 Iustin Pop

4374 8729e0d7 Iustin Pop
    """
4375 8729e0d7 Iustin Pop
    instance = self.instance
4376 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
4377 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4378 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4379 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
4380 72737a7f Iustin Pop
      if (not result or not isinstance(result, (list, tuple)) or
4381 72737a7f Iustin Pop
          len(result) != 2):
4382 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4383 8729e0d7 Iustin Pop
      elif not result[0]:
4384 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4385 8729e0d7 Iustin Pop
                                 (node, result[1]))
4386 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4387 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4388 6605411d Iustin Pop
    if self.op.wait_for_sync:
4389 6605411d Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, instance, self.proc)
4390 6605411d Iustin Pop
      if disk_abort:
4391 6605411d Iustin Pop
        logger.Error("Warning: disk sync-ing has not returned a good status.\n"
4392 6605411d Iustin Pop
                     " Please check the instance.")
4393 8729e0d7 Iustin Pop
4394 8729e0d7 Iustin Pop
4395 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4396 a8083063 Iustin Pop
  """Query runtime instance data.
4397 a8083063 Iustin Pop

4398 a8083063 Iustin Pop
  """
4399 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
4400 a987fa48 Guido Trotter
  REQ_BGL = False
4401 ae5849b5 Michael Hanselmann
4402 a987fa48 Guido Trotter
  def ExpandNames(self):
4403 a987fa48 Guido Trotter
    self.needed_locks = {}
4404 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4405 a987fa48 Guido Trotter
4406 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
4407 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4408 a987fa48 Guido Trotter
4409 a987fa48 Guido Trotter
    if self.op.instances:
4410 a987fa48 Guido Trotter
      self.wanted_names = []
4411 a987fa48 Guido Trotter
      for name in self.op.instances:
4412 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
4413 a987fa48 Guido Trotter
        if full_name is None:
4414 a987fa48 Guido Trotter
          raise errors.OpPrereqError("Instance '%s' not known" %
4415 a987fa48 Guido Trotter
                                     self.op.instance_name)
4416 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
4417 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4418 a987fa48 Guido Trotter
    else:
4419 a987fa48 Guido Trotter
      self.wanted_names = None
4420 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4421 a987fa48 Guido Trotter
4422 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4423 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4424 a987fa48 Guido Trotter
4425 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
4426 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
4427 a987fa48 Guido Trotter
      self._LockInstancesNodes()
4428 a8083063 Iustin Pop
4429 a8083063 Iustin Pop
  def CheckPrereq(self):
4430 a8083063 Iustin Pop
    """Check prerequisites.
4431 a8083063 Iustin Pop

4432 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4433 a8083063 Iustin Pop

4434 a8083063 Iustin Pop
    """
4435 a987fa48 Guido Trotter
    if self.wanted_names is None:
4436 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4437 a8083063 Iustin Pop
4438 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4439 a987fa48 Guido Trotter
                             in self.wanted_names]
4440 a987fa48 Guido Trotter
    return
4441 a8083063 Iustin Pop
4442 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4443 a8083063 Iustin Pop
    """Compute block device status.
4444 a8083063 Iustin Pop

4445 a8083063 Iustin Pop
    """
4446 57821cac Iustin Pop
    static = self.op.static
4447 57821cac Iustin Pop
    if not static:
4448 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
4449 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
4450 57821cac Iustin Pop
    else:
4451 57821cac Iustin Pop
      dev_pstatus = None
4452 57821cac Iustin Pop
4453 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4454 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4455 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4456 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4457 a8083063 Iustin Pop
      else:
4458 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4459 a8083063 Iustin Pop
4460 57821cac Iustin Pop
    if snode and not static:
4461 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4462 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
4463 a8083063 Iustin Pop
    else:
4464 a8083063 Iustin Pop
      dev_sstatus = None
4465 a8083063 Iustin Pop
4466 a8083063 Iustin Pop
    if dev.children:
4467 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4468 a8083063 Iustin Pop
                      for child in dev.children]
4469 a8083063 Iustin Pop
    else:
4470 a8083063 Iustin Pop
      dev_children = []
4471 a8083063 Iustin Pop
4472 a8083063 Iustin Pop
    data = {
4473 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4474 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4475 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4476 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4477 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4478 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4479 a8083063 Iustin Pop
      "children": dev_children,
4480 a8083063 Iustin Pop
      }
4481 a8083063 Iustin Pop
4482 a8083063 Iustin Pop
    return data
4483 a8083063 Iustin Pop
4484 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4485 a8083063 Iustin Pop
    """Gather and return data"""
4486 a8083063 Iustin Pop
    result = {}
4487 338e51e8 Iustin Pop
4488 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4489 338e51e8 Iustin Pop
4490 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4491 57821cac Iustin Pop
      if not self.op.static:
4492 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
4493 57821cac Iustin Pop
                                                  instance.name,
4494 57821cac Iustin Pop
                                                  instance.hypervisor)
4495 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
4496 57821cac Iustin Pop
          remote_state = "up"
4497 57821cac Iustin Pop
        else:
4498 57821cac Iustin Pop
          remote_state = "down"
4499 a8083063 Iustin Pop
      else:
4500 57821cac Iustin Pop
        remote_state = None
4501 a8083063 Iustin Pop
      if instance.status == "down":
4502 a8083063 Iustin Pop
        config_state = "down"
4503 a8083063 Iustin Pop
      else:
4504 a8083063 Iustin Pop
        config_state = "up"
4505 a8083063 Iustin Pop
4506 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4507 a8083063 Iustin Pop
               for device in instance.disks]
4508 a8083063 Iustin Pop
4509 a8083063 Iustin Pop
      idict = {
4510 a8083063 Iustin Pop
        "name": instance.name,
4511 a8083063 Iustin Pop
        "config_state": config_state,
4512 a8083063 Iustin Pop
        "run_state": remote_state,
4513 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4514 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4515 a8083063 Iustin Pop
        "os": instance.os,
4516 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4517 a8083063 Iustin Pop
        "disks": disks,
4518 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
4519 24838135 Iustin Pop
        "network_port": instance.network_port,
4520 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
4521 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
4522 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
4523 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
4524 a8083063 Iustin Pop
        }
4525 a8083063 Iustin Pop
4526 a8083063 Iustin Pop
      result[instance.name] = idict
4527 a8083063 Iustin Pop
4528 a8083063 Iustin Pop
    return result
4529 a8083063 Iustin Pop
4530 a8083063 Iustin Pop
4531 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4532 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4533 a8083063 Iustin Pop

4534 a8083063 Iustin Pop
  """
4535 a8083063 Iustin Pop
  HPATH = "instance-modify"
4536 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4537 74409b12 Iustin Pop
  _OP_REQP = ["instance_name", "hvparams"]
4538 1a5c7281 Guido Trotter
  REQ_BGL = False
4539 1a5c7281 Guido Trotter
4540 1a5c7281 Guido Trotter
  def ExpandNames(self):
4541 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4542 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
4543 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4544 74409b12 Iustin Pop
4545 74409b12 Iustin Pop
4546 74409b12 Iustin Pop
  def DeclareLocks(self, level):
4547 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
4548 74409b12 Iustin Pop
      self._LockInstancesNodes()
4549 a8083063 Iustin Pop
4550 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4551 a8083063 Iustin Pop
    """Build hooks env.
4552 a8083063 Iustin Pop

4553 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4554 a8083063 Iustin Pop

4555 a8083063 Iustin Pop
    """
4556 396e1b78 Michael Hanselmann
    args = dict()
4557 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
4558 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
4559 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
4560 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
4561 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4562 396e1b78 Michael Hanselmann
      if self.do_ip:
4563 396e1b78 Michael Hanselmann
        ip = self.ip
4564 396e1b78 Michael Hanselmann
      else:
4565 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4566 396e1b78 Michael Hanselmann
      if self.bridge:
4567 396e1b78 Michael Hanselmann
        bridge = self.bridge
4568 396e1b78 Michael Hanselmann
      else:
4569 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4570 ef756965 Iustin Pop
      if self.mac:
4571 ef756965 Iustin Pop
        mac = self.mac
4572 ef756965 Iustin Pop
      else:
4573 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4574 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4575 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
4576 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(),
4577 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4578 a8083063 Iustin Pop
    return env, nl, nl
4579 a8083063 Iustin Pop
4580 a8083063 Iustin Pop
  def CheckPrereq(self):
4581 a8083063 Iustin Pop
    """Check prerequisites.
4582 a8083063 Iustin Pop

4583 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4584 a8083063 Iustin Pop

4585 a8083063 Iustin Pop
    """
4586 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4587 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4588 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4589 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4590 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4591 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4592 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4593 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4594 4300c4b6 Guido Trotter
    self.force = getattr(self.op, "force", None)
4595 338e51e8 Iustin Pop
    all_parms = [self.ip, self.bridge, self.mac]
4596 338e51e8 Iustin Pop
    if (all_parms.count(None) == len(all_parms) and
4597 338e51e8 Iustin Pop
        not self.op.hvparams and
4598 338e51e8 Iustin Pop
        not self.op.beparams):
4599 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4600 338e51e8 Iustin Pop
    for item in (constants.BE_MEMORY, constants.BE_VCPUS):
4601 338e51e8 Iustin Pop
      val = self.op.beparams.get(item, None)
4602 338e51e8 Iustin Pop
      if val is not None:
4603 338e51e8 Iustin Pop
        try:
4604 338e51e8 Iustin Pop
          val = int(val)
4605 338e51e8 Iustin Pop
        except ValueError, err:
4606 338e51e8 Iustin Pop
          raise errors.OpPrereqError("Invalid %s size: %s" % (item, str(err)))
4607 338e51e8 Iustin Pop
        self.op.beparams[item] = val
4608 a8083063 Iustin Pop
    if self.ip is not None:
4609 a8083063 Iustin Pop
      self.do_ip = True
4610 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4611 a8083063 Iustin Pop
        self.ip = None
4612 a8083063 Iustin Pop
      else:
4613 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4614 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4615 a8083063 Iustin Pop
    else:
4616 a8083063 Iustin Pop
      self.do_ip = False
4617 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4618 1862d460 Alexander Schreiber
    if self.mac is not None:
4619 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4620 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4621 1862d460 Alexander Schreiber
                                   self.mac)
4622 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4623 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4624 a8083063 Iustin Pop
4625 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
4626 31a853d2 Iustin Pop
4627 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4628 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4629 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4630 74409b12 Iustin Pop
    pnode = self.instance.primary_node
4631 74409b12 Iustin Pop
    nodelist = [pnode]
4632 74409b12 Iustin Pop
    nodelist.extend(instance.secondary_nodes)
4633 74409b12 Iustin Pop
4634 338e51e8 Iustin Pop
    # hvparams processing
4635 74409b12 Iustin Pop
    if self.op.hvparams:
4636 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
4637 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
4638 74409b12 Iustin Pop
        if val is None:
4639 74409b12 Iustin Pop
          try:
4640 74409b12 Iustin Pop
            del i_hvdict[key]
4641 74409b12 Iustin Pop
          except KeyError:
4642 74409b12 Iustin Pop
            pass
4643 74409b12 Iustin Pop
        else:
4644 74409b12 Iustin Pop
          i_hvdict[key] = val
4645 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4646 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
4647 74409b12 Iustin Pop
                                i_hvdict)
4648 74409b12 Iustin Pop
      # local check
4649 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
4650 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
4651 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
4652 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
4653 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
4654 338e51e8 Iustin Pop
    else:
4655 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
4656 338e51e8 Iustin Pop
4657 338e51e8 Iustin Pop
    # beparams processing
4658 338e51e8 Iustin Pop
    if self.op.beparams:
4659 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
4660 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
4661 338e51e8 Iustin Pop
        if val is None:
4662 338e51e8 Iustin Pop
          try:
4663 338e51e8 Iustin Pop
            del i_bedict[key]
4664 338e51e8 Iustin Pop
          except KeyError:
4665 338e51e8 Iustin Pop
            pass
4666 338e51e8 Iustin Pop
        else:
4667 338e51e8 Iustin Pop
          i_bedict[key] = val
4668 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4669 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4670 338e51e8 Iustin Pop
                                i_bedict)
4671 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
4672 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
4673 338e51e8 Iustin Pop
    else:
4674 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
4675 74409b12 Iustin Pop
4676 cfefe007 Guido Trotter
    self.warn = []
4677 647a5d80 Iustin Pop
4678 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
4679 647a5d80 Iustin Pop
      mem_check_list = [pnode]
4680 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
4681 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
4682 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
4683 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
4684 72737a7f Iustin Pop
                                                  instance.hypervisor)
4685 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
4686 72737a7f Iustin Pop
                                         instance.hypervisor)
4687 cfefe007 Guido Trotter
4688 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4689 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4690 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4691 cfefe007 Guido Trotter
      else:
4692 cfefe007 Guido Trotter
        if instance_info:
4693 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4694 cfefe007 Guido Trotter
        else:
4695 cfefe007 Guido Trotter
          # Assume instance not running
4696 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4697 cfefe007 Guido Trotter
          # and we have no other way to check)
4698 cfefe007 Guido Trotter
          current_mem = 0
4699 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
4700 338e51e8 Iustin Pop
                    nodeinfo[pnode]['memory_free'])
4701 cfefe007 Guido Trotter
        if miss_mem > 0:
4702 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4703 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4704 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4705 cfefe007 Guido Trotter
4706 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
4707 647a5d80 Iustin Pop
        for node in instance.secondary_nodes:
4708 647a5d80 Iustin Pop
          if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4709 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
4710 647a5d80 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nodeinfo[node]['memory_free']:
4711 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
4712 647a5d80 Iustin Pop
                             " secondary node %s" % node)
4713 5bc84f33 Alexander Schreiber
4714 a8083063 Iustin Pop
    return
4715 a8083063 Iustin Pop
4716 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4717 a8083063 Iustin Pop
    """Modifies an instance.
4718 a8083063 Iustin Pop

4719 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4720 a8083063 Iustin Pop
    """
4721 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4722 cfefe007 Guido Trotter
    # feedback_fn there.
4723 cfefe007 Guido Trotter
    for warn in self.warn:
4724 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4725 cfefe007 Guido Trotter
4726 a8083063 Iustin Pop
    result = []
4727 a8083063 Iustin Pop
    instance = self.instance
4728 a8083063 Iustin Pop
    if self.do_ip:
4729 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4730 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4731 a8083063 Iustin Pop
    if self.bridge:
4732 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4733 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4734 1862d460 Alexander Schreiber
    if self.mac:
4735 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4736 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4737 74409b12 Iustin Pop
    if self.op.hvparams:
4738 74409b12 Iustin Pop
      instance.hvparams = self.hv_new
4739 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
4740 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
4741 338e51e8 Iustin Pop
    if self.op.beparams:
4742 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
4743 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
4744 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
4745 a8083063 Iustin Pop
4746 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4747 a8083063 Iustin Pop
4748 a8083063 Iustin Pop
    return result
4749 a8083063 Iustin Pop
4750 a8083063 Iustin Pop
4751 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4752 a8083063 Iustin Pop
  """Query the exports list
4753 a8083063 Iustin Pop

4754 a8083063 Iustin Pop
  """
4755 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
4756 21a15682 Guido Trotter
  REQ_BGL = False
4757 21a15682 Guido Trotter
4758 21a15682 Guido Trotter
  def ExpandNames(self):
4759 21a15682 Guido Trotter
    self.needed_locks = {}
4760 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4761 21a15682 Guido Trotter
    if not self.op.nodes:
4762 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4763 21a15682 Guido Trotter
    else:
4764 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
4765 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
4766 a8083063 Iustin Pop
4767 a8083063 Iustin Pop
  def CheckPrereq(self):
4768 21a15682 Guido Trotter
    """Check prerequisites.
4769 a8083063 Iustin Pop

4770 a8083063 Iustin Pop
    """
4771 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
4772 a8083063 Iustin Pop
4773 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4774 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4775 a8083063 Iustin Pop

4776 a8083063 Iustin Pop
    Returns:
4777 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4778 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4779 a8083063 Iustin Pop
      that node.
4780 a8083063 Iustin Pop

4781 a8083063 Iustin Pop
    """
4782 72737a7f Iustin Pop
    return self.rpc.call_export_list(self.nodes)
4783 a8083063 Iustin Pop
4784 a8083063 Iustin Pop
4785 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4786 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4787 a8083063 Iustin Pop

4788 a8083063 Iustin Pop
  """
4789 a8083063 Iustin Pop
  HPATH = "instance-export"
4790 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4791 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4792 6657590e Guido Trotter
  REQ_BGL = False
4793 6657590e Guido Trotter
4794 6657590e Guido Trotter
  def ExpandNames(self):
4795 6657590e Guido Trotter
    self._ExpandAndLockInstance()
4796 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
4797 6657590e Guido Trotter
    #
4798 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
4799 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
4800 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
4801 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
4802 6657590e Guido Trotter
    #    then one to remove, after
4803 6657590e Guido Trotter
    #  - removing the removal operation altoghether
4804 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4805 6657590e Guido Trotter
4806 6657590e Guido Trotter
  def DeclareLocks(self, level):
4807 6657590e Guido Trotter
    """Last minute lock declaration."""
4808 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
4809 a8083063 Iustin Pop
4810 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4811 a8083063 Iustin Pop
    """Build hooks env.
4812 a8083063 Iustin Pop

4813 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4814 a8083063 Iustin Pop

4815 a8083063 Iustin Pop
    """
4816 a8083063 Iustin Pop
    env = {
4817 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4818 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4819 a8083063 Iustin Pop
      }
4820 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4821 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
4822 a8083063 Iustin Pop
          self.op.target_node]
4823 a8083063 Iustin Pop
    return env, nl, nl
4824 a8083063 Iustin Pop
4825 a8083063 Iustin Pop
  def CheckPrereq(self):
4826 a8083063 Iustin Pop
    """Check prerequisites.
4827 a8083063 Iustin Pop

4828 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4829 a8083063 Iustin Pop

4830 a8083063 Iustin Pop
    """
4831 6657590e Guido Trotter
    instance_name = self.op.instance_name
4832 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4833 6657590e Guido Trotter
    assert self.instance is not None, \
4834 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
4835 a8083063 Iustin Pop
4836 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
4837 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
4838 a8083063 Iustin Pop
4839 6657590e Guido Trotter
    assert self.dst_node is not None, \
4840 6657590e Guido Trotter
          "Cannot retrieve locked node %s" % self.op.target_node
4841 a8083063 Iustin Pop
4842 b6023d6c Manuel Franceschini
    # instance disk type verification
4843 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4844 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4845 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4846 b6023d6c Manuel Franceschini
                                   " file-based disks")
4847 b6023d6c Manuel Franceschini
4848 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4849 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4850 a8083063 Iustin Pop

4851 a8083063 Iustin Pop
    """
4852 a8083063 Iustin Pop
    instance = self.instance
4853 a8083063 Iustin Pop
    dst_node = self.dst_node
4854 a8083063 Iustin Pop
    src_node = instance.primary_node
4855 a8083063 Iustin Pop
    if self.op.shutdown:
4856 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4857 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(src_node, instance):
4858 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4859 38206f3c Iustin Pop
                                 (instance.name, src_node))
4860 a8083063 Iustin Pop
4861 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4862 a8083063 Iustin Pop
4863 a8083063 Iustin Pop
    snap_disks = []
4864 a8083063 Iustin Pop
4865 a8083063 Iustin Pop
    try:
4866 a8083063 Iustin Pop
      for disk in instance.disks:
4867 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4868 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4869 72737a7f Iustin Pop
          new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
4870 a8083063 Iustin Pop
4871 a8083063 Iustin Pop
          if not new_dev_name:
4872 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4873 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4874 a8083063 Iustin Pop
          else:
4875 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4876 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4877 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4878 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4879 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4880 a8083063 Iustin Pop
4881 a8083063 Iustin Pop
    finally:
4882 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4883 72737a7f Iustin Pop
        if not self.rpc.call_instance_start(src_node, instance, None):
4884 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
4885 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4886 a8083063 Iustin Pop
4887 a8083063 Iustin Pop
    # TODO: check for size
4888 a8083063 Iustin Pop
4889 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
4890 a8083063 Iustin Pop
    for dev in snap_disks:
4891 72737a7f Iustin Pop
      if not self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
4892 62c9ec92 Iustin Pop
                                      instance, cluster_name):
4893 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4894 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4895 72737a7f Iustin Pop
      if not self.rpc.call_blockdev_remove(src_node, dev):
4896 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4897 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4898 a8083063 Iustin Pop
4899 72737a7f Iustin Pop
    if not self.rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4900 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4901 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4902 a8083063 Iustin Pop
4903 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4904 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4905 a8083063 Iustin Pop
4906 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4907 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4908 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4909 a8083063 Iustin Pop
    if nodelist:
4910 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
4911 a8083063 Iustin Pop
      for node in exportlist:
4912 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4913 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
4914 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4915 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4916 5c947f38 Iustin Pop
4917 5c947f38 Iustin Pop
4918 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4919 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4920 9ac99fda Guido Trotter

4921 9ac99fda Guido Trotter
  """
4922 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4923 3656b3af Guido Trotter
  REQ_BGL = False
4924 3656b3af Guido Trotter
4925 3656b3af Guido Trotter
  def ExpandNames(self):
4926 3656b3af Guido Trotter
    self.needed_locks = {}
4927 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
4928 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
4929 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
4930 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4931 9ac99fda Guido Trotter
4932 9ac99fda Guido Trotter
  def CheckPrereq(self):
4933 9ac99fda Guido Trotter
    """Check prerequisites.
4934 9ac99fda Guido Trotter
    """
4935 9ac99fda Guido Trotter
    pass
4936 9ac99fda Guido Trotter
4937 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4938 9ac99fda Guido Trotter
    """Remove any export.
4939 9ac99fda Guido Trotter

4940 9ac99fda Guido Trotter
    """
4941 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4942 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4943 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4944 9ac99fda Guido Trotter
    fqdn_warn = False
4945 9ac99fda Guido Trotter
    if not instance_name:
4946 9ac99fda Guido Trotter
      fqdn_warn = True
4947 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4948 9ac99fda Guido Trotter
4949 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
4950 72737a7f Iustin Pop
      locking.LEVEL_NODE])
4951 9ac99fda Guido Trotter
    found = False
4952 9ac99fda Guido Trotter
    for node in exportlist:
4953 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4954 9ac99fda Guido Trotter
        found = True
4955 72737a7f Iustin Pop
        if not self.rpc.call_export_remove(node, instance_name):
4956 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4957 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4958 9ac99fda Guido Trotter
4959 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4960 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4961 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4962 9ac99fda Guido Trotter
                  " Domain Name.")
4963 9ac99fda Guido Trotter
4964 9ac99fda Guido Trotter
4965 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4966 5c947f38 Iustin Pop
  """Generic tags LU.
4967 5c947f38 Iustin Pop

4968 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4969 5c947f38 Iustin Pop

4970 5c947f38 Iustin Pop
  """
4971 5c947f38 Iustin Pop
4972 8646adce Guido Trotter
  def ExpandNames(self):
4973 8646adce Guido Trotter
    self.needed_locks = {}
4974 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
4975 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4976 5c947f38 Iustin Pop
      if name is None:
4977 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4978 3ecf6786 Iustin Pop
                                   (self.op.name,))
4979 5c947f38 Iustin Pop
      self.op.name = name
4980 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
4981 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4982 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4983 5c947f38 Iustin Pop
      if name is None:
4984 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4985 3ecf6786 Iustin Pop
                                   (self.op.name,))
4986 5c947f38 Iustin Pop
      self.op.name = name
4987 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
4988 8646adce Guido Trotter
4989 8646adce Guido Trotter
  def CheckPrereq(self):
4990 8646adce Guido Trotter
    """Check prerequisites.
4991 8646adce Guido Trotter

4992 8646adce Guido Trotter
    """
4993 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
4994 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
4995 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
4996 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
4997 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
4998 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
4999 5c947f38 Iustin Pop
    else:
5000 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
5001 3ecf6786 Iustin Pop
                                 str(self.op.kind))
5002 5c947f38 Iustin Pop
5003 5c947f38 Iustin Pop
5004 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
5005 5c947f38 Iustin Pop
  """Returns the tags of a given object.
5006 5c947f38 Iustin Pop

5007 5c947f38 Iustin Pop
  """
5008 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
5009 8646adce Guido Trotter
  REQ_BGL = False
5010 5c947f38 Iustin Pop
5011 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5012 5c947f38 Iustin Pop
    """Returns the tag list.
5013 5c947f38 Iustin Pop

5014 5c947f38 Iustin Pop
    """
5015 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
5016 5c947f38 Iustin Pop
5017 5c947f38 Iustin Pop
5018 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
5019 73415719 Iustin Pop
  """Searches the tags for a given pattern.
5020 73415719 Iustin Pop

5021 73415719 Iustin Pop
  """
5022 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
5023 8646adce Guido Trotter
  REQ_BGL = False
5024 8646adce Guido Trotter
5025 8646adce Guido Trotter
  def ExpandNames(self):
5026 8646adce Guido Trotter
    self.needed_locks = {}
5027 73415719 Iustin Pop
5028 73415719 Iustin Pop
  def CheckPrereq(self):
5029 73415719 Iustin Pop
    """Check prerequisites.
5030 73415719 Iustin Pop

5031 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
5032 73415719 Iustin Pop

5033 73415719 Iustin Pop
    """
5034 73415719 Iustin Pop
    try:
5035 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
5036 73415719 Iustin Pop
    except re.error, err:
5037 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
5038 73415719 Iustin Pop
                                 (self.op.pattern, err))
5039 73415719 Iustin Pop
5040 73415719 Iustin Pop
  def Exec(self, feedback_fn):
5041 73415719 Iustin Pop
    """Returns the tag list.
5042 73415719 Iustin Pop

5043 73415719 Iustin Pop
    """
5044 73415719 Iustin Pop
    cfg = self.cfg
5045 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
5046 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
5047 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
5048 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
5049 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
5050 73415719 Iustin Pop
    results = []
5051 73415719 Iustin Pop
    for path, target in tgts:
5052 73415719 Iustin Pop
      for tag in target.GetTags():
5053 73415719 Iustin Pop
        if self.re.search(tag):
5054 73415719 Iustin Pop
          results.append((path, tag))
5055 73415719 Iustin Pop
    return results
5056 73415719 Iustin Pop
5057 73415719 Iustin Pop
5058 f27302fa Iustin Pop
class LUAddTags(TagsLU):
5059 5c947f38 Iustin Pop
  """Sets a tag on a given object.
5060 5c947f38 Iustin Pop

5061 5c947f38 Iustin Pop
  """
5062 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5063 8646adce Guido Trotter
  REQ_BGL = False
5064 5c947f38 Iustin Pop
5065 5c947f38 Iustin Pop
  def CheckPrereq(self):
5066 5c947f38 Iustin Pop
    """Check prerequisites.
5067 5c947f38 Iustin Pop

5068 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
5069 5c947f38 Iustin Pop

5070 5c947f38 Iustin Pop
    """
5071 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5072 f27302fa Iustin Pop
    for tag in self.op.tags:
5073 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5074 5c947f38 Iustin Pop
5075 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5076 5c947f38 Iustin Pop
    """Sets the tag.
5077 5c947f38 Iustin Pop

5078 5c947f38 Iustin Pop
    """
5079 5c947f38 Iustin Pop
    try:
5080 f27302fa Iustin Pop
      for tag in self.op.tags:
5081 f27302fa Iustin Pop
        self.target.AddTag(tag)
5082 5c947f38 Iustin Pop
    except errors.TagError, err:
5083 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
5084 5c947f38 Iustin Pop
    try:
5085 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5086 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5087 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5088 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5089 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5090 5c947f38 Iustin Pop
5091 5c947f38 Iustin Pop
5092 f27302fa Iustin Pop
class LUDelTags(TagsLU):
5093 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
5094 5c947f38 Iustin Pop

5095 5c947f38 Iustin Pop
  """
5096 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
5097 8646adce Guido Trotter
  REQ_BGL = False
5098 5c947f38 Iustin Pop
5099 5c947f38 Iustin Pop
  def CheckPrereq(self):
5100 5c947f38 Iustin Pop
    """Check prerequisites.
5101 5c947f38 Iustin Pop

5102 5c947f38 Iustin Pop
    This checks that we have the given tag.
5103 5c947f38 Iustin Pop

5104 5c947f38 Iustin Pop
    """
5105 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
5106 f27302fa Iustin Pop
    for tag in self.op.tags:
5107 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
5108 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
5109 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
5110 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
5111 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
5112 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
5113 f27302fa Iustin Pop
      diff_names.sort()
5114 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
5115 f27302fa Iustin Pop
                                 (",".join(diff_names)))
5116 5c947f38 Iustin Pop
5117 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
5118 5c947f38 Iustin Pop
    """Remove the tag from the object.
5119 5c947f38 Iustin Pop

5120 5c947f38 Iustin Pop
    """
5121 f27302fa Iustin Pop
    for tag in self.op.tags:
5122 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
5123 5c947f38 Iustin Pop
    try:
5124 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
5125 5c947f38 Iustin Pop
    except errors.ConfigurationError:
5126 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
5127 3ecf6786 Iustin Pop
                                " config file and the operation has been"
5128 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
5129 06009e27 Iustin Pop
5130 0eed6e61 Guido Trotter
5131 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
5132 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
5133 06009e27 Iustin Pop

5134 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
5135 06009e27 Iustin Pop
  time.
5136 06009e27 Iustin Pop

5137 06009e27 Iustin Pop
  """
5138 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5139 fbe9022f Guido Trotter
  REQ_BGL = False
5140 06009e27 Iustin Pop
5141 fbe9022f Guido Trotter
  def ExpandNames(self):
5142 fbe9022f Guido Trotter
    """Expand names and set required locks.
5143 06009e27 Iustin Pop

5144 fbe9022f Guido Trotter
    This expands the node list, if any.
5145 06009e27 Iustin Pop

5146 06009e27 Iustin Pop
    """
5147 fbe9022f Guido Trotter
    self.needed_locks = {}
5148 06009e27 Iustin Pop
    if self.op.on_nodes:
5149 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
5150 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5151 fbe9022f Guido Trotter
      # more information.
5152 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5153 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5154 fbe9022f Guido Trotter
5155 fbe9022f Guido Trotter
  def CheckPrereq(self):
5156 fbe9022f Guido Trotter
    """Check prerequisites.
5157 fbe9022f Guido Trotter

5158 fbe9022f Guido Trotter
    """
5159 06009e27 Iustin Pop
5160 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
5161 06009e27 Iustin Pop
    """Do the actual sleep.
5162 06009e27 Iustin Pop

5163 06009e27 Iustin Pop
    """
5164 06009e27 Iustin Pop
    if self.op.on_master:
5165 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
5166 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
5167 06009e27 Iustin Pop
    if self.op.on_nodes:
5168 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5169 06009e27 Iustin Pop
      if not result:
5170 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
5171 06009e27 Iustin Pop
      for node, node_result in result.items():
5172 06009e27 Iustin Pop
        if not node_result:
5173 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
5174 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
5175 d61df03e Iustin Pop
5176 d61df03e Iustin Pop
5177 d1c2dd75 Iustin Pop
class IAllocator(object):
5178 d1c2dd75 Iustin Pop
  """IAllocator framework.
5179 d61df03e Iustin Pop

5180 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
5181 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
5182 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
5183 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
5184 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
5185 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
5186 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
5187 d1c2dd75 Iustin Pop
      easy usage
5188 d61df03e Iustin Pop

5189 d61df03e Iustin Pop
  """
5190 29859cb7 Iustin Pop
  _ALLO_KEYS = [
5191 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
5192 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
5193 d1c2dd75 Iustin Pop
    ]
5194 29859cb7 Iustin Pop
  _RELO_KEYS = [
5195 29859cb7 Iustin Pop
    "relocate_from",
5196 29859cb7 Iustin Pop
    ]
5197 d1c2dd75 Iustin Pop
5198 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
5199 72737a7f Iustin Pop
    self.lu = lu
5200 d1c2dd75 Iustin Pop
    # init buffer variables
5201 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
5202 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
5203 29859cb7 Iustin Pop
    self.mode = mode
5204 29859cb7 Iustin Pop
    self.name = name
5205 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
5206 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
5207 29859cb7 Iustin Pop
    self.relocate_from = None
5208 27579978 Iustin Pop
    # computed fields
5209 27579978 Iustin Pop
    self.required_nodes = None
5210 d1c2dd75 Iustin Pop
    # init result fields
5211 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
5212 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5213 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
5214 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5215 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
5216 29859cb7 Iustin Pop
    else:
5217 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5218 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
5219 d1c2dd75 Iustin Pop
    for key in kwargs:
5220 29859cb7 Iustin Pop
      if key not in keyset:
5221 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5222 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5223 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
5224 29859cb7 Iustin Pop
    for key in keyset:
5225 d1c2dd75 Iustin Pop
      if key not in kwargs:
5226 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5227 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
5228 d1c2dd75 Iustin Pop
    self._BuildInputData()
5229 d1c2dd75 Iustin Pop
5230 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
5231 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
5232 d1c2dd75 Iustin Pop

5233 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
5234 d1c2dd75 Iustin Pop

5235 d1c2dd75 Iustin Pop
    """
5236 72737a7f Iustin Pop
    cfg = self.lu.cfg
5237 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
5238 d1c2dd75 Iustin Pop
    # cluster data
5239 d1c2dd75 Iustin Pop
    data = {
5240 d1c2dd75 Iustin Pop
      "version": 1,
5241 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
5242 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
5243 e69d05fd Iustin Pop
      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5244 d1c2dd75 Iustin Pop
      # we don't have job IDs
5245 d61df03e Iustin Pop
      }
5246 d61df03e Iustin Pop
5247 338e51e8 Iustin Pop
    i_list = []
5248 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5249 338e51e8 Iustin Pop
    for iname in cfg.GetInstanceList():
5250 338e51e8 Iustin Pop
      i_obj = cfg.GetInstanceInfo(iname)
5251 338e51e8 Iustin Pop
      i_list.append((i_obj, cluster.FillBE(i_obj)))
5252 6286519f Iustin Pop
5253 d1c2dd75 Iustin Pop
    # node data
5254 d1c2dd75 Iustin Pop
    node_results = {}
5255 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
5256 e69d05fd Iustin Pop
    # FIXME: here we have only one hypervisor information, but
5257 e69d05fd Iustin Pop
    # instance can belong to different hypervisors
5258 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
5259 72737a7f Iustin Pop
                                           cfg.GetHypervisorType())
5260 d1c2dd75 Iustin Pop
    for nname in node_list:
5261 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
5262 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
5263 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
5264 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
5265 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5266 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
5267 d1c2dd75 Iustin Pop
        if attr not in remote_info:
5268 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5269 d1c2dd75 Iustin Pop
                                   (nname, attr))
5270 d1c2dd75 Iustin Pop
        try:
5271 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
5272 d1c2dd75 Iustin Pop
        except ValueError, err:
5273 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5274 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
5275 6286519f Iustin Pop
      # compute memory used by primary instances
5276 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
5277 338e51e8 Iustin Pop
      for iinfo, beinfo in i_list:
5278 6286519f Iustin Pop
        if iinfo.primary_node == nname:
5279 338e51e8 Iustin Pop
          i_p_mem += beinfo[constants.BE_MEMORY]
5280 6286519f Iustin Pop
          if iinfo.status == "up":
5281 338e51e8 Iustin Pop
            i_p_up_mem += beinfo[constants.BE_MEMORY]
5282 6286519f Iustin Pop
5283 b2662e7f Iustin Pop
      # compute memory used by instances
5284 d1c2dd75 Iustin Pop
      pnr = {
5285 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
5286 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
5287 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
5288 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
5289 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
5290 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
5291 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
5292 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
5293 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
5294 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5295 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5296 d1c2dd75 Iustin Pop
        }
5297 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5298 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5299 d1c2dd75 Iustin Pop
5300 d1c2dd75 Iustin Pop
    # instance data
5301 d1c2dd75 Iustin Pop
    instance_data = {}
5302 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
5303 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5304 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5305 d1c2dd75 Iustin Pop
      pir = {
5306 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5307 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5308 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
5309 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
5310 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5311 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5312 d1c2dd75 Iustin Pop
        "nics": nic_data,
5313 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5314 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5315 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
5316 d1c2dd75 Iustin Pop
        }
5317 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5318 d61df03e Iustin Pop
5319 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5320 d61df03e Iustin Pop
5321 d1c2dd75 Iustin Pop
    self.in_data = data
5322 d61df03e Iustin Pop
5323 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5324 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5325 d61df03e Iustin Pop

5326 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5327 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5328 d61df03e Iustin Pop

5329 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5330 d1c2dd75 Iustin Pop
    done.
5331 d61df03e Iustin Pop

5332 d1c2dd75 Iustin Pop
    """
5333 d1c2dd75 Iustin Pop
    data = self.in_data
5334 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5335 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5336 d1c2dd75 Iustin Pop
5337 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
5338 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
5339 d1c2dd75 Iustin Pop
5340 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5341 27579978 Iustin Pop
      self.required_nodes = 2
5342 27579978 Iustin Pop
    else:
5343 27579978 Iustin Pop
      self.required_nodes = 1
5344 d1c2dd75 Iustin Pop
    request = {
5345 d1c2dd75 Iustin Pop
      "type": "allocate",
5346 d1c2dd75 Iustin Pop
      "name": self.name,
5347 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5348 d1c2dd75 Iustin Pop
      "tags": self.tags,
5349 d1c2dd75 Iustin Pop
      "os": self.os,
5350 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5351 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5352 d1c2dd75 Iustin Pop
      "disks": self.disks,
5353 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5354 d1c2dd75 Iustin Pop
      "nics": self.nics,
5355 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5356 d1c2dd75 Iustin Pop
      }
5357 d1c2dd75 Iustin Pop
    data["request"] = request
5358 298fe380 Iustin Pop
5359 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5360 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5361 298fe380 Iustin Pop

5362 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5363 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5364 d61df03e Iustin Pop

5365 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5366 d1c2dd75 Iustin Pop
    done.
5367 d61df03e Iustin Pop

5368 d1c2dd75 Iustin Pop
    """
5369 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
5370 27579978 Iustin Pop
    if instance is None:
5371 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5372 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5373 27579978 Iustin Pop
5374 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5375 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5376 27579978 Iustin Pop
5377 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5378 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5379 2a139bb0 Iustin Pop
5380 27579978 Iustin Pop
    self.required_nodes = 1
5381 27579978 Iustin Pop
5382 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
5383 27579978 Iustin Pop
                                  instance.disks[0].size,
5384 27579978 Iustin Pop
                                  instance.disks[1].size)
5385 27579978 Iustin Pop
5386 d1c2dd75 Iustin Pop
    request = {
5387 2a139bb0 Iustin Pop
      "type": "relocate",
5388 d1c2dd75 Iustin Pop
      "name": self.name,
5389 27579978 Iustin Pop
      "disk_space_total": disk_space,
5390 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5391 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5392 d1c2dd75 Iustin Pop
      }
5393 27579978 Iustin Pop
    self.in_data["request"] = request
5394 d61df03e Iustin Pop
5395 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5396 d1c2dd75 Iustin Pop
    """Build input data structures.
5397 d61df03e Iustin Pop

5398 d1c2dd75 Iustin Pop
    """
5399 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5400 d61df03e Iustin Pop
5401 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5402 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5403 d1c2dd75 Iustin Pop
    else:
5404 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5405 d61df03e Iustin Pop
5406 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5407 d61df03e Iustin Pop
5408 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
5409 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5410 298fe380 Iustin Pop

5411 d1c2dd75 Iustin Pop
    """
5412 72737a7f Iustin Pop
    if call_fn is None:
5413 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
5414 d1c2dd75 Iustin Pop
    data = self.in_text
5415 298fe380 Iustin Pop
5416 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
5417 298fe380 Iustin Pop
5418 43f5ea7a Guido Trotter
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5419 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5420 8d528b7c Iustin Pop
5421 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5422 8d528b7c Iustin Pop
5423 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5424 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5425 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5426 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5427 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5428 8d528b7c Iustin Pop
    self.out_text = stdout
5429 d1c2dd75 Iustin Pop
    if validate:
5430 d1c2dd75 Iustin Pop
      self._ValidateResult()
5431 298fe380 Iustin Pop
5432 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5433 d1c2dd75 Iustin Pop
    """Process the allocator results.
5434 538475ca Iustin Pop

5435 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5436 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5437 538475ca Iustin Pop

5438 d1c2dd75 Iustin Pop
    """
5439 d1c2dd75 Iustin Pop
    try:
5440 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5441 d1c2dd75 Iustin Pop
    except Exception, err:
5442 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5443 d1c2dd75 Iustin Pop
5444 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5445 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5446 538475ca Iustin Pop
5447 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5448 d1c2dd75 Iustin Pop
      if key not in rdict:
5449 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5450 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5451 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5452 538475ca Iustin Pop
5453 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5454 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5455 d1c2dd75 Iustin Pop
                               " is not a list")
5456 d1c2dd75 Iustin Pop
    self.out_data = rdict
5457 538475ca Iustin Pop
5458 538475ca Iustin Pop
5459 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5460 d61df03e Iustin Pop
  """Run allocator tests.
5461 d61df03e Iustin Pop

5462 d61df03e Iustin Pop
  This LU runs the allocator tests
5463 d61df03e Iustin Pop

5464 d61df03e Iustin Pop
  """
5465 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5466 d61df03e Iustin Pop
5467 d61df03e Iustin Pop
  def CheckPrereq(self):
5468 d61df03e Iustin Pop
    """Check prerequisites.
5469 d61df03e Iustin Pop

5470 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5471 d61df03e Iustin Pop

5472 d61df03e Iustin Pop
    """
5473 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5474 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5475 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5476 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5477 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5478 d61df03e Iustin Pop
                                     attr)
5479 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5480 d61df03e Iustin Pop
      if iname is not None:
5481 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5482 d61df03e Iustin Pop
                                   iname)
5483 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5484 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5485 d61df03e Iustin Pop
      for row in self.op.nics:
5486 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5487 d61df03e Iustin Pop
            "mac" not in row or
5488 d61df03e Iustin Pop
            "ip" not in row or
5489 d61df03e Iustin Pop
            "bridge" not in row):
5490 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5491 d61df03e Iustin Pop
                                     " 'nics' parameter")
5492 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5493 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5494 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5495 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5496 d61df03e Iustin Pop
      for row in self.op.disks:
5497 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5498 d61df03e Iustin Pop
            "size" not in row or
5499 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5500 d61df03e Iustin Pop
            "mode" not in row or
5501 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5502 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5503 d61df03e Iustin Pop
                                     " 'disks' parameter")
5504 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5505 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5506 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5507 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5508 d61df03e Iustin Pop
      if fname is None:
5509 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5510 d61df03e Iustin Pop
                                   self.op.name)
5511 d61df03e Iustin Pop
      self.op.name = fname
5512 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5513 d61df03e Iustin Pop
    else:
5514 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5515 d61df03e Iustin Pop
                                 self.op.mode)
5516 d61df03e Iustin Pop
5517 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5518 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5519 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5520 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5521 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5522 d61df03e Iustin Pop
                                 self.op.direction)
5523 d61df03e Iustin Pop
5524 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5525 d61df03e Iustin Pop
    """Run the allocator test.
5526 d61df03e Iustin Pop

5527 d61df03e Iustin Pop
    """
5528 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5529 72737a7f Iustin Pop
      ial = IAllocator(self,
5530 29859cb7 Iustin Pop
                       mode=self.op.mode,
5531 29859cb7 Iustin Pop
                       name=self.op.name,
5532 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5533 29859cb7 Iustin Pop
                       disks=self.op.disks,
5534 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5535 29859cb7 Iustin Pop
                       os=self.op.os,
5536 29859cb7 Iustin Pop
                       tags=self.op.tags,
5537 29859cb7 Iustin Pop
                       nics=self.op.nics,
5538 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5539 29859cb7 Iustin Pop
                       )
5540 29859cb7 Iustin Pop
    else:
5541 72737a7f Iustin Pop
      ial = IAllocator(self,
5542 29859cb7 Iustin Pop
                       mode=self.op.mode,
5543 29859cb7 Iustin Pop
                       name=self.op.name,
5544 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5545 29859cb7 Iustin Pop
                       )
5546 d61df03e Iustin Pop
5547 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5548 d1c2dd75 Iustin Pop
      result = ial.in_text
5549 298fe380 Iustin Pop
    else:
5550 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5551 d1c2dd75 Iustin Pop
      result = ial.out_text
5552 298fe380 Iustin Pop
    return result