Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 023e3296

History | View | Annotate | Download (180.9 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 8d14b30d Iustin Pop
from ganeti import serializer
45 d61df03e Iustin Pop
46 d61df03e Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 d465bdc8 Guido Trotter
    - implement ExpandNames
52 d465bdc8 Guido Trotter
    - implement CheckPrereq
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 05f86716 Guido Trotter
    - optionally redefine their run requirements:
57 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
58 05f86716 Guido Trotter
        REQ_WSSTORE: the LU needs a writable SimpleStore
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 a8083063 Iustin Pop
  REQ_MASTER = True
68 05f86716 Guido Trotter
  REQ_WSSTORE = False
69 7e55040e Guido Trotter
  REQ_BGL = True
70 a8083063 Iustin Pop
71 77b657a3 Guido Trotter
  def __init__(self, processor, op, context, sstore):
72 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
75 a8083063 Iustin Pop
    validity.
76 a8083063 Iustin Pop

77 a8083063 Iustin Pop
    """
78 5bfac263 Iustin Pop
    self.proc = processor
79 a8083063 Iustin Pop
    self.op = op
80 77b657a3 Guido Trotter
    self.cfg = context.cfg
81 a8083063 Iustin Pop
    self.sstore = sstore
82 77b657a3 Guido Trotter
    self.context = context
83 d465bdc8 Guido Trotter
    self.needed_locks = None
84 6683bba2 Guido Trotter
    self.acquired_locks = {}
85 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 c92b310a Michael Hanselmann
90 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
91 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
92 a8083063 Iustin Pop
      if attr_val is None:
93 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
94 3ecf6786 Iustin Pop
                                   attr_name)
95 c6d58a2b Michael Hanselmann
96 f64c9de6 Guido Trotter
    if not self.cfg.IsCluster():
97 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
98 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
99 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
100 c6d58a2b Michael Hanselmann
      master = sstore.GetMasterNode()
101 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
102 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
103 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
104 a8083063 Iustin Pop
105 c92b310a Michael Hanselmann
  def __GetSSH(self):
106 c92b310a Michael Hanselmann
    """Returns the SshRunner object
107 c92b310a Michael Hanselmann

108 c92b310a Michael Hanselmann
    """
109 c92b310a Michael Hanselmann
    if not self.__ssh:
110 1ff08570 Michael Hanselmann
      self.__ssh = ssh.SshRunner(self.sstore)
111 c92b310a Michael Hanselmann
    return self.__ssh
112 c92b310a Michael Hanselmann
113 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
114 c92b310a Michael Hanselmann
115 d465bdc8 Guido Trotter
  def ExpandNames(self):
116 d465bdc8 Guido Trotter
    """Expand names for this LU.
117 d465bdc8 Guido Trotter

118 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
119 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
120 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
121 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
122 d465bdc8 Guido Trotter

123 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
124 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
125 d465bdc8 Guido Trotter
    as values. Rules:
126 d465bdc8 Guido Trotter
      - Use an empty dict if you don't need any lock
127 d465bdc8 Guido Trotter
      - If you don't need any lock at a particular level omit that level
128 d465bdc8 Guido Trotter
      - Don't put anything for the BGL level
129 e310b019 Guido Trotter
      - If you want all locks at a level use locking.ALL_SET as a value
130 d465bdc8 Guido Trotter

131 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
132 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
133 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
134 3977a4c1 Guido Trotter

135 d465bdc8 Guido Trotter
    Examples:
136 d465bdc8 Guido Trotter
    # Acquire all nodes and one instance
137 d465bdc8 Guido Trotter
    self.needed_locks = {
138 e310b019 Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
139 3a5d7305 Guido Trotter
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
140 d465bdc8 Guido Trotter
    }
141 d465bdc8 Guido Trotter
    # Acquire just two nodes
142 d465bdc8 Guido Trotter
    self.needed_locks = {
143 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
144 d465bdc8 Guido Trotter
    }
145 d465bdc8 Guido Trotter
    # Acquire no locks
146 d465bdc8 Guido Trotter
    self.needed_locks = {} # No, you can't leave it to the default value None
147 d465bdc8 Guido Trotter

148 d465bdc8 Guido Trotter
    """
149 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
150 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
151 d465bdc8 Guido Trotter
    # time.
152 d465bdc8 Guido Trotter
    if self.REQ_BGL:
153 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
154 d465bdc8 Guido Trotter
    else:
155 d465bdc8 Guido Trotter
      raise NotImplementedError
156 d465bdc8 Guido Trotter
157 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
158 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
159 fb8dcb62 Guido Trotter

160 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
161 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
162 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
163 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
164 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
165 fb8dcb62 Guido Trotter
    default it does nothing.
166 fb8dcb62 Guido Trotter

167 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
168 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
169 fb8dcb62 Guido Trotter

170 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
171 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
172 fb8dcb62 Guido Trotter

173 fb8dcb62 Guido Trotter
    """
174 fb8dcb62 Guido Trotter
175 a8083063 Iustin Pop
  def CheckPrereq(self):
176 a8083063 Iustin Pop
    """Check prerequisites for this LU.
177 a8083063 Iustin Pop

178 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
179 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
180 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
181 a8083063 Iustin Pop
    allowed.
182 a8083063 Iustin Pop

183 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
184 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
185 a8083063 Iustin Pop

186 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
187 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
188 a8083063 Iustin Pop

189 a8083063 Iustin Pop
    """
190 a8083063 Iustin Pop
    raise NotImplementedError
191 a8083063 Iustin Pop
192 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
193 a8083063 Iustin Pop
    """Execute the LU.
194 a8083063 Iustin Pop

195 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
196 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
197 a8083063 Iustin Pop
    code, or expected.
198 a8083063 Iustin Pop

199 a8083063 Iustin Pop
    """
200 a8083063 Iustin Pop
    raise NotImplementedError
201 a8083063 Iustin Pop
202 a8083063 Iustin Pop
  def BuildHooksEnv(self):
203 a8083063 Iustin Pop
    """Build hooks environment for this LU.
204 a8083063 Iustin Pop

205 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
206 a8083063 Iustin Pop
    containing the environment that will be used for running the
207 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
208 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
209 a8083063 Iustin Pop
    the hook should run after the execution.
210 a8083063 Iustin Pop

211 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
212 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
213 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
214 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
215 a8083063 Iustin Pop

216 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
217 a8083063 Iustin Pop

218 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
219 a8083063 Iustin Pop
    not be called.
220 a8083063 Iustin Pop

221 a8083063 Iustin Pop
    """
222 a8083063 Iustin Pop
    raise NotImplementedError
223 a8083063 Iustin Pop
224 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
225 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
226 1fce5219 Guido Trotter

227 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
228 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
229 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
230 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
231 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
232 1fce5219 Guido Trotter

233 1fce5219 Guido Trotter
    Args:
234 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
235 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
236 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
237 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
238 1fce5219 Guido Trotter

239 1fce5219 Guido Trotter
    """
240 1fce5219 Guido Trotter
    return lu_result
241 1fce5219 Guido Trotter
242 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
243 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
244 43905206 Guido Trotter

245 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
246 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
247 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
248 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
249 43905206 Guido Trotter
    before.
250 43905206 Guido Trotter

251 43905206 Guido Trotter
    """
252 43905206 Guido Trotter
    if self.needed_locks is None:
253 43905206 Guido Trotter
      self.needed_locks = {}
254 43905206 Guido Trotter
    else:
255 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
256 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
257 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
258 43905206 Guido Trotter
    if expanded_name is None:
259 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
260 43905206 Guido Trotter
                                  self.op.instance_name)
261 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
262 43905206 Guido Trotter
    self.op.instance_name = expanded_name
263 43905206 Guido Trotter
264 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
265 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
266 c4a2fee1 Guido Trotter

267 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
268 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
269 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
270 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
271 c4a2fee1 Guido Trotter

272 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
273 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
274 c4a2fee1 Guido Trotter

275 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
276 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
277 c4a2fee1 Guido Trotter

278 c4a2fee1 Guido Trotter
    If should be called in DeclareLocks in a way similar to:
279 c4a2fee1 Guido Trotter

280 c4a2fee1 Guido Trotter
    if level == locking.LEVEL_NODE:
281 c4a2fee1 Guido Trotter
      self._LockInstancesNodes()
282 c4a2fee1 Guido Trotter

283 a82ce292 Guido Trotter
    @type primary_only: boolean
284 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
285 a82ce292 Guido Trotter

286 c4a2fee1 Guido Trotter
    """
287 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
288 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
289 c4a2fee1 Guido Trotter
290 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
291 c4a2fee1 Guido Trotter
292 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
293 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
294 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
295 c4a2fee1 Guido Trotter
    wanted_nodes = []
296 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
297 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
298 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
299 a82ce292 Guido Trotter
      if not primary_only:
300 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
301 c4a2fee1 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
302 c4a2fee1 Guido Trotter
303 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
304 c4a2fee1 Guido Trotter
305 a8083063 Iustin Pop
306 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
307 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
308 a8083063 Iustin Pop

309 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
310 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
311 a8083063 Iustin Pop

312 a8083063 Iustin Pop
  """
313 a8083063 Iustin Pop
  HPATH = None
314 a8083063 Iustin Pop
  HTYPE = None
315 a8083063 Iustin Pop
316 a8083063 Iustin Pop
317 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
318 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
319 83120a01 Michael Hanselmann

320 83120a01 Michael Hanselmann
  Args:
321 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
322 83120a01 Michael Hanselmann

323 83120a01 Michael Hanselmann
  """
324 3312b702 Iustin Pop
  if not isinstance(nodes, list):
325 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
326 dcb93971 Michael Hanselmann
327 ea47808a Guido Trotter
  if not nodes:
328 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
329 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
330 dcb93971 Michael Hanselmann
331 ea47808a Guido Trotter
  wanted = []
332 ea47808a Guido Trotter
  for name in nodes:
333 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
334 ea47808a Guido Trotter
    if node is None:
335 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
336 ea47808a Guido Trotter
    wanted.append(node)
337 dcb93971 Michael Hanselmann
338 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
339 3312b702 Iustin Pop
340 3312b702 Iustin Pop
341 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
342 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
343 3312b702 Iustin Pop

344 3312b702 Iustin Pop
  Args:
345 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
346 3312b702 Iustin Pop

347 3312b702 Iustin Pop
  """
348 3312b702 Iustin Pop
  if not isinstance(instances, list):
349 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
350 3312b702 Iustin Pop
351 3312b702 Iustin Pop
  if instances:
352 3312b702 Iustin Pop
    wanted = []
353 3312b702 Iustin Pop
354 3312b702 Iustin Pop
    for name in instances:
355 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
356 3312b702 Iustin Pop
      if instance is None:
357 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
358 3312b702 Iustin Pop
      wanted.append(instance)
359 3312b702 Iustin Pop
360 3312b702 Iustin Pop
  else:
361 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
362 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
363 dcb93971 Michael Hanselmann
364 dcb93971 Michael Hanselmann
365 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
366 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
367 83120a01 Michael Hanselmann

368 83120a01 Michael Hanselmann
  Args:
369 83120a01 Michael Hanselmann
    static: Static fields
370 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
371 83120a01 Michael Hanselmann

372 83120a01 Michael Hanselmann
  """
373 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
374 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
375 dcb93971 Michael Hanselmann
376 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
377 dcb93971 Michael Hanselmann
378 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
379 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
380 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
381 3ecf6786 Iustin Pop
                                          difference(all_fields)))
382 dcb93971 Michael Hanselmann
383 dcb93971 Michael Hanselmann
384 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
385 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
386 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
387 ecb215b5 Michael Hanselmann

388 ecb215b5 Michael Hanselmann
  Args:
389 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
390 396e1b78 Michael Hanselmann
  """
391 396e1b78 Michael Hanselmann
  env = {
392 0e137c28 Iustin Pop
    "OP_TARGET": name,
393 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
394 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
395 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
396 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
397 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
398 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
399 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
400 396e1b78 Michael Hanselmann
  }
401 396e1b78 Michael Hanselmann
402 396e1b78 Michael Hanselmann
  if nics:
403 396e1b78 Michael Hanselmann
    nic_count = len(nics)
404 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
405 396e1b78 Michael Hanselmann
      if ip is None:
406 396e1b78 Michael Hanselmann
        ip = ""
407 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
408 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
409 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
410 396e1b78 Michael Hanselmann
  else:
411 396e1b78 Michael Hanselmann
    nic_count = 0
412 396e1b78 Michael Hanselmann
413 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
414 396e1b78 Michael Hanselmann
415 396e1b78 Michael Hanselmann
  return env
416 396e1b78 Michael Hanselmann
417 396e1b78 Michael Hanselmann
418 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
419 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
420 ecb215b5 Michael Hanselmann

421 ecb215b5 Michael Hanselmann
  Args:
422 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
423 ecb215b5 Michael Hanselmann
    override: dict of values to override
424 ecb215b5 Michael Hanselmann
  """
425 396e1b78 Michael Hanselmann
  args = {
426 396e1b78 Michael Hanselmann
    'name': instance.name,
427 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
428 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
429 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
430 396e1b78 Michael Hanselmann
    'status': instance.os,
431 396e1b78 Michael Hanselmann
    'memory': instance.memory,
432 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
433 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
434 396e1b78 Michael Hanselmann
  }
435 396e1b78 Michael Hanselmann
  if override:
436 396e1b78 Michael Hanselmann
    args.update(override)
437 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
438 396e1b78 Michael Hanselmann
439 396e1b78 Michael Hanselmann
440 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
441 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
442 bf6929a2 Alexander Schreiber

443 bf6929a2 Alexander Schreiber
  """
444 bf6929a2 Alexander Schreiber
  # check bridges existance
445 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
446 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
447 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
448 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
449 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
450 bf6929a2 Alexander Schreiber
451 bf6929a2 Alexander Schreiber
452 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
453 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
454 a8083063 Iustin Pop

455 a8083063 Iustin Pop
  """
456 a8083063 Iustin Pop
  _OP_REQP = []
457 a8083063 Iustin Pop
458 a8083063 Iustin Pop
  def CheckPrereq(self):
459 a8083063 Iustin Pop
    """Check prerequisites.
460 a8083063 Iustin Pop

461 a8083063 Iustin Pop
    This checks whether the cluster is empty.
462 a8083063 Iustin Pop

463 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
464 a8083063 Iustin Pop

465 a8083063 Iustin Pop
    """
466 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
467 a8083063 Iustin Pop
468 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
469 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
470 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
471 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
472 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
473 db915bd1 Michael Hanselmann
    if instancelist:
474 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
475 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
476 a8083063 Iustin Pop
477 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
478 a8083063 Iustin Pop
    """Destroys the cluster.
479 a8083063 Iustin Pop

480 a8083063 Iustin Pop
    """
481 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
482 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
483 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
484 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
485 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
486 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
487 140aa4a8 Iustin Pop
    return master
488 a8083063 Iustin Pop
489 a8083063 Iustin Pop
490 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
491 a8083063 Iustin Pop
  """Verifies the cluster status.
492 a8083063 Iustin Pop

493 a8083063 Iustin Pop
  """
494 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
495 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
496 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
497 a8083063 Iustin Pop
498 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
499 a8083063 Iustin Pop
                  remote_version, feedback_fn):
500 a8083063 Iustin Pop
    """Run multiple tests against a node.
501 a8083063 Iustin Pop

502 a8083063 Iustin Pop
    Test list:
503 a8083063 Iustin Pop
      - compares ganeti version
504 a8083063 Iustin Pop
      - checks vg existance and size > 20G
505 a8083063 Iustin Pop
      - checks config file checksum
506 a8083063 Iustin Pop
      - checks ssh to other nodes
507 a8083063 Iustin Pop

508 a8083063 Iustin Pop
    Args:
509 a8083063 Iustin Pop
      node: name of the node to check
510 a8083063 Iustin Pop
      file_list: required list of files
511 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
512 098c0958 Michael Hanselmann

513 a8083063 Iustin Pop
    """
514 a8083063 Iustin Pop
    # compares ganeti version
515 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
516 a8083063 Iustin Pop
    if not remote_version:
517 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
518 a8083063 Iustin Pop
      return True
519 a8083063 Iustin Pop
520 a8083063 Iustin Pop
    if local_version != remote_version:
521 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
522 a8083063 Iustin Pop
                      (local_version, node, remote_version))
523 a8083063 Iustin Pop
      return True
524 a8083063 Iustin Pop
525 a8083063 Iustin Pop
    # checks vg existance and size > 20G
526 a8083063 Iustin Pop
527 a8083063 Iustin Pop
    bad = False
528 a8083063 Iustin Pop
    if not vglist:
529 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
530 a8083063 Iustin Pop
                      (node,))
531 a8083063 Iustin Pop
      bad = True
532 a8083063 Iustin Pop
    else:
533 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
534 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
535 a8083063 Iustin Pop
      if vgstatus:
536 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
537 a8083063 Iustin Pop
        bad = True
538 a8083063 Iustin Pop
539 a8083063 Iustin Pop
    # checks config file checksum
540 a8083063 Iustin Pop
    # checks ssh to any
541 a8083063 Iustin Pop
542 a8083063 Iustin Pop
    if 'filelist' not in node_result:
543 a8083063 Iustin Pop
      bad = True
544 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
545 a8083063 Iustin Pop
    else:
546 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
547 a8083063 Iustin Pop
      for file_name in file_list:
548 a8083063 Iustin Pop
        if file_name not in remote_cksum:
549 a8083063 Iustin Pop
          bad = True
550 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
551 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
552 a8083063 Iustin Pop
          bad = True
553 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
554 a8083063 Iustin Pop
555 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
556 a8083063 Iustin Pop
      bad = True
557 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
558 a8083063 Iustin Pop
    else:
559 a8083063 Iustin Pop
      if node_result['nodelist']:
560 a8083063 Iustin Pop
        bad = True
561 a8083063 Iustin Pop
        for node in node_result['nodelist']:
562 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
563 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
564 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
565 9d4bfc96 Iustin Pop
      bad = True
566 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
567 9d4bfc96 Iustin Pop
    else:
568 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
569 9d4bfc96 Iustin Pop
        bad = True
570 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
571 9d4bfc96 Iustin Pop
        for node in nlist:
572 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
573 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
574 9d4bfc96 Iustin Pop
575 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
576 a8083063 Iustin Pop
    if hyp_result is not None:
577 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
578 a8083063 Iustin Pop
    return bad
579 a8083063 Iustin Pop
580 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
581 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
582 a8083063 Iustin Pop
    """Verify an instance.
583 a8083063 Iustin Pop

584 a8083063 Iustin Pop
    This function checks to see if the required block devices are
585 a8083063 Iustin Pop
    available on the instance's node.
586 a8083063 Iustin Pop

587 a8083063 Iustin Pop
    """
588 a8083063 Iustin Pop
    bad = False
589 a8083063 Iustin Pop
590 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
591 a8083063 Iustin Pop
592 a8083063 Iustin Pop
    node_vol_should = {}
593 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
594 a8083063 Iustin Pop
595 a8083063 Iustin Pop
    for node in node_vol_should:
596 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
597 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
598 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
599 a8083063 Iustin Pop
                          (volume, node))
600 a8083063 Iustin Pop
          bad = True
601 a8083063 Iustin Pop
602 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
603 a872dae6 Guido Trotter
      if (node_current not in node_instance or
604 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
605 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
606 a8083063 Iustin Pop
                        (instance, node_current))
607 a8083063 Iustin Pop
        bad = True
608 a8083063 Iustin Pop
609 a8083063 Iustin Pop
    for node in node_instance:
610 a8083063 Iustin Pop
      if (not node == node_current):
611 a8083063 Iustin Pop
        if instance in node_instance[node]:
612 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
613 a8083063 Iustin Pop
                          (instance, node))
614 a8083063 Iustin Pop
          bad = True
615 a8083063 Iustin Pop
616 6a438c98 Michael Hanselmann
    return bad
617 a8083063 Iustin Pop
618 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
619 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
620 a8083063 Iustin Pop

621 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
622 a8083063 Iustin Pop
    reported as unknown.
623 a8083063 Iustin Pop

624 a8083063 Iustin Pop
    """
625 a8083063 Iustin Pop
    bad = False
626 a8083063 Iustin Pop
627 a8083063 Iustin Pop
    for node in node_vol_is:
628 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
629 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
630 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
631 a8083063 Iustin Pop
                      (volume, node))
632 a8083063 Iustin Pop
          bad = True
633 a8083063 Iustin Pop
    return bad
634 a8083063 Iustin Pop
635 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
636 a8083063 Iustin Pop
    """Verify the list of running instances.
637 a8083063 Iustin Pop

638 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
639 a8083063 Iustin Pop

640 a8083063 Iustin Pop
    """
641 a8083063 Iustin Pop
    bad = False
642 a8083063 Iustin Pop
    for node in node_instance:
643 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
644 a8083063 Iustin Pop
        if runninginstance not in instancelist:
645 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
646 a8083063 Iustin Pop
                          (runninginstance, node))
647 a8083063 Iustin Pop
          bad = True
648 a8083063 Iustin Pop
    return bad
649 a8083063 Iustin Pop
650 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
651 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
652 2b3b6ddd Guido Trotter

653 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
654 2b3b6ddd Guido Trotter
    was primary for.
655 2b3b6ddd Guido Trotter

656 2b3b6ddd Guido Trotter
    """
657 2b3b6ddd Guido Trotter
    bad = False
658 2b3b6ddd Guido Trotter
659 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
660 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
661 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
662 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
663 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
664 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
665 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
666 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
667 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
668 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
669 2b3b6ddd Guido Trotter
        needed_mem = 0
670 2b3b6ddd Guido Trotter
        for instance in instances:
671 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
672 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
673 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
674 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
675 2b3b6ddd Guido Trotter
          bad = True
676 2b3b6ddd Guido Trotter
    return bad
677 2b3b6ddd Guido Trotter
678 a8083063 Iustin Pop
  def CheckPrereq(self):
679 a8083063 Iustin Pop
    """Check prerequisites.
680 a8083063 Iustin Pop

681 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
682 e54c4c5e Guido Trotter
    all its members are valid.
683 a8083063 Iustin Pop

684 a8083063 Iustin Pop
    """
685 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
686 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
687 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
688 a8083063 Iustin Pop
689 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
690 d8fff41c Guido Trotter
    """Build hooks env.
691 d8fff41c Guido Trotter

692 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
693 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
694 d8fff41c Guido Trotter

695 d8fff41c Guido Trotter
    """
696 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
697 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
698 d8fff41c Guido Trotter
    env = {}
699 d8fff41c Guido Trotter
    return env, [], all_nodes
700 d8fff41c Guido Trotter
701 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
702 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
703 a8083063 Iustin Pop

704 a8083063 Iustin Pop
    """
705 a8083063 Iustin Pop
    bad = False
706 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
707 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
708 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
709 a8083063 Iustin Pop
710 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
711 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
712 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
713 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
714 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
715 a8083063 Iustin Pop
    node_volume = {}
716 a8083063 Iustin Pop
    node_instance = {}
717 9c9c7d30 Guido Trotter
    node_info = {}
718 26b6af5e Guido Trotter
    instance_cfg = {}
719 a8083063 Iustin Pop
720 a8083063 Iustin Pop
    # FIXME: verify OS list
721 a8083063 Iustin Pop
    # do local checksums
722 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
723 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
724 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
725 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
726 a8083063 Iustin Pop
727 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
728 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
729 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
730 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
731 a8083063 Iustin Pop
    node_verify_param = {
732 a8083063 Iustin Pop
      'filelist': file_names,
733 a8083063 Iustin Pop
      'nodelist': nodelist,
734 a8083063 Iustin Pop
      'hypervisor': None,
735 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
736 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
737 a8083063 Iustin Pop
      }
738 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
739 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
740 9c9c7d30 Guido Trotter
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
741 a8083063 Iustin Pop
742 a8083063 Iustin Pop
    for node in nodelist:
743 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
744 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
745 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
746 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
747 a8083063 Iustin Pop
      bad = bad or result
748 a8083063 Iustin Pop
749 a8083063 Iustin Pop
      # node_volume
750 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
751 a8083063 Iustin Pop
752 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
753 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
754 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
755 b63ed789 Iustin Pop
        bad = True
756 b63ed789 Iustin Pop
        node_volume[node] = {}
757 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
758 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
759 a8083063 Iustin Pop
        bad = True
760 a8083063 Iustin Pop
        continue
761 b63ed789 Iustin Pop
      else:
762 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
763 a8083063 Iustin Pop
764 a8083063 Iustin Pop
      # node_instance
765 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
766 a8083063 Iustin Pop
      if type(nodeinstance) != list:
767 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
768 a8083063 Iustin Pop
        bad = True
769 a8083063 Iustin Pop
        continue
770 a8083063 Iustin Pop
771 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
772 a8083063 Iustin Pop
773 9c9c7d30 Guido Trotter
      # node_info
774 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
775 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
776 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
777 9c9c7d30 Guido Trotter
        bad = True
778 9c9c7d30 Guido Trotter
        continue
779 9c9c7d30 Guido Trotter
780 9c9c7d30 Guido Trotter
      try:
781 9c9c7d30 Guido Trotter
        node_info[node] = {
782 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
783 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
784 93e4c50b Guido Trotter
          "pinst": [],
785 93e4c50b Guido Trotter
          "sinst": [],
786 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
787 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
788 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
789 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
790 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
791 36e7da50 Guido Trotter
          # secondary.
792 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
793 9c9c7d30 Guido Trotter
        }
794 9c9c7d30 Guido Trotter
      except ValueError:
795 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
796 9c9c7d30 Guido Trotter
        bad = True
797 9c9c7d30 Guido Trotter
        continue
798 9c9c7d30 Guido Trotter
799 a8083063 Iustin Pop
    node_vol_should = {}
800 a8083063 Iustin Pop
801 a8083063 Iustin Pop
    for instance in instancelist:
802 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
803 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
804 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
805 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
806 c5705f58 Guido Trotter
      bad = bad or result
807 a8083063 Iustin Pop
808 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
809 a8083063 Iustin Pop
810 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
811 26b6af5e Guido Trotter
812 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
813 93e4c50b Guido Trotter
      if pnode in node_info:
814 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
815 93e4c50b Guido Trotter
      else:
816 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
817 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
818 93e4c50b Guido Trotter
        bad = True
819 93e4c50b Guido Trotter
820 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
821 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
822 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
823 93e4c50b Guido Trotter
      # supported either.
824 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
825 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
826 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
827 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
828 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
829 93e4c50b Guido Trotter
                    % instance)
830 93e4c50b Guido Trotter
831 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
832 93e4c50b Guido Trotter
        if snode in node_info:
833 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
834 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
835 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
836 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
837 93e4c50b Guido Trotter
        else:
838 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
839 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
840 93e4c50b Guido Trotter
841 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
842 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
843 a8083063 Iustin Pop
                                       feedback_fn)
844 a8083063 Iustin Pop
    bad = bad or result
845 a8083063 Iustin Pop
846 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
847 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
848 a8083063 Iustin Pop
                                         feedback_fn)
849 a8083063 Iustin Pop
    bad = bad or result
850 a8083063 Iustin Pop
851 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
852 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
853 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
854 e54c4c5e Guido Trotter
      bad = bad or result
855 2b3b6ddd Guido Trotter
856 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
857 2b3b6ddd Guido Trotter
    if i_non_redundant:
858 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
859 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
860 2b3b6ddd Guido Trotter
861 34290825 Michael Hanselmann
    return not bad
862 a8083063 Iustin Pop
863 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
864 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
865 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
866 d8fff41c Guido Trotter

867 d8fff41c Guido Trotter
    Args:
868 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
869 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
870 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
871 d8fff41c Guido Trotter
      lu_result: previous Exec result
872 d8fff41c Guido Trotter

873 d8fff41c Guido Trotter
    """
874 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
875 38206f3c Iustin Pop
    # their results
876 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
877 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
878 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
879 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
880 d8fff41c Guido Trotter
      if not hooks_results:
881 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
882 d8fff41c Guido Trotter
        lu_result = 1
883 d8fff41c Guido Trotter
      else:
884 d8fff41c Guido Trotter
        for node_name in hooks_results:
885 d8fff41c Guido Trotter
          show_node_header = True
886 d8fff41c Guido Trotter
          res = hooks_results[node_name]
887 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
888 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
889 d8fff41c Guido Trotter
            lu_result = 1
890 d8fff41c Guido Trotter
            continue
891 d8fff41c Guido Trotter
          for script, hkr, output in res:
892 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
893 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
894 d8fff41c Guido Trotter
              # failing hooks on that node
895 d8fff41c Guido Trotter
              if show_node_header:
896 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
897 d8fff41c Guido Trotter
                show_node_header = False
898 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
899 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
900 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
901 d8fff41c Guido Trotter
              lu_result = 1
902 d8fff41c Guido Trotter
903 d8fff41c Guido Trotter
      return lu_result
904 d8fff41c Guido Trotter
905 a8083063 Iustin Pop
906 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
907 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
908 2c95a8d4 Iustin Pop

909 2c95a8d4 Iustin Pop
  """
910 2c95a8d4 Iustin Pop
  _OP_REQP = []
911 2c95a8d4 Iustin Pop
912 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
913 2c95a8d4 Iustin Pop
    """Check prerequisites.
914 2c95a8d4 Iustin Pop

915 2c95a8d4 Iustin Pop
    This has no prerequisites.
916 2c95a8d4 Iustin Pop

917 2c95a8d4 Iustin Pop
    """
918 2c95a8d4 Iustin Pop
    pass
919 2c95a8d4 Iustin Pop
920 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
921 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
922 2c95a8d4 Iustin Pop

923 2c95a8d4 Iustin Pop
    """
924 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
925 2c95a8d4 Iustin Pop
926 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
927 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
928 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
929 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
930 2c95a8d4 Iustin Pop
931 2c95a8d4 Iustin Pop
    nv_dict = {}
932 2c95a8d4 Iustin Pop
    for inst in instances:
933 2c95a8d4 Iustin Pop
      inst_lvs = {}
934 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
935 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
936 2c95a8d4 Iustin Pop
        continue
937 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
938 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
939 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
940 2c95a8d4 Iustin Pop
        for vol in vol_list:
941 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
942 2c95a8d4 Iustin Pop
943 2c95a8d4 Iustin Pop
    if not nv_dict:
944 2c95a8d4 Iustin Pop
      return result
945 2c95a8d4 Iustin Pop
946 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
947 2c95a8d4 Iustin Pop
948 2c95a8d4 Iustin Pop
    to_act = set()
949 2c95a8d4 Iustin Pop
    for node in nodes:
950 2c95a8d4 Iustin Pop
      # node_volume
951 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
952 2c95a8d4 Iustin Pop
953 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
954 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
955 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
956 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
957 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
958 2c95a8d4 Iustin Pop
                    (node,))
959 2c95a8d4 Iustin Pop
        res_nodes.append(node)
960 2c95a8d4 Iustin Pop
        continue
961 2c95a8d4 Iustin Pop
962 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
963 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
964 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
965 b63ed789 Iustin Pop
            and inst.name not in res_instances):
966 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
967 2c95a8d4 Iustin Pop
968 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
969 b63ed789 Iustin Pop
    # data better
970 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
971 b63ed789 Iustin Pop
      if inst.name not in res_missing:
972 b63ed789 Iustin Pop
        res_missing[inst.name] = []
973 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
974 b63ed789 Iustin Pop
975 2c95a8d4 Iustin Pop
    return result
976 2c95a8d4 Iustin Pop
977 2c95a8d4 Iustin Pop
978 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
979 07bd8a51 Iustin Pop
  """Rename the cluster.
980 07bd8a51 Iustin Pop

981 07bd8a51 Iustin Pop
  """
982 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
983 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
984 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
985 05f86716 Guido Trotter
  REQ_WSSTORE = True
986 07bd8a51 Iustin Pop
987 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
988 07bd8a51 Iustin Pop
    """Build hooks env.
989 07bd8a51 Iustin Pop

990 07bd8a51 Iustin Pop
    """
991 07bd8a51 Iustin Pop
    env = {
992 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
993 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
994 07bd8a51 Iustin Pop
      }
995 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
996 07bd8a51 Iustin Pop
    return env, [mn], [mn]
997 07bd8a51 Iustin Pop
998 07bd8a51 Iustin Pop
  def CheckPrereq(self):
999 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1000 07bd8a51 Iustin Pop

1001 07bd8a51 Iustin Pop
    """
1002 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1003 07bd8a51 Iustin Pop
1004 bcf043c9 Iustin Pop
    new_name = hostname.name
1005 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1006 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
1007 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
1008 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1009 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1010 07bd8a51 Iustin Pop
                                 " cluster has changed")
1011 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1012 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1013 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1014 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1015 07bd8a51 Iustin Pop
                                   new_ip)
1016 07bd8a51 Iustin Pop
1017 07bd8a51 Iustin Pop
    self.op.name = new_name
1018 07bd8a51 Iustin Pop
1019 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1020 07bd8a51 Iustin Pop
    """Rename the cluster.
1021 07bd8a51 Iustin Pop

1022 07bd8a51 Iustin Pop
    """
1023 07bd8a51 Iustin Pop
    clustername = self.op.name
1024 07bd8a51 Iustin Pop
    ip = self.ip
1025 07bd8a51 Iustin Pop
    ss = self.sstore
1026 07bd8a51 Iustin Pop
1027 07bd8a51 Iustin Pop
    # shutdown the master IP
1028 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
1029 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
1030 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1031 07bd8a51 Iustin Pop
1032 07bd8a51 Iustin Pop
    try:
1033 07bd8a51 Iustin Pop
      # modify the sstore
1034 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1035 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1036 07bd8a51 Iustin Pop
1037 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1038 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1039 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1040 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1041 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1042 07bd8a51 Iustin Pop
1043 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1044 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1045 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1046 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1047 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1048 07bd8a51 Iustin Pop
          if not result[to_node]:
1049 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1050 07bd8a51 Iustin Pop
                         (fname, to_node))
1051 07bd8a51 Iustin Pop
    finally:
1052 1c65840b Iustin Pop
      if not rpc.call_node_start_master(master, False):
1053 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1054 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1055 07bd8a51 Iustin Pop
1056 07bd8a51 Iustin Pop
1057 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1058 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1059 8084f9f6 Manuel Franceschini

1060 8084f9f6 Manuel Franceschini
  Args:
1061 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1062 8084f9f6 Manuel Franceschini

1063 8084f9f6 Manuel Franceschini
  Returns:
1064 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1065 8084f9f6 Manuel Franceschini

1066 8084f9f6 Manuel Franceschini
  """
1067 8084f9f6 Manuel Franceschini
  if disk.children:
1068 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1069 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1070 8084f9f6 Manuel Franceschini
        return True
1071 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1072 8084f9f6 Manuel Franceschini
1073 8084f9f6 Manuel Franceschini
1074 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1075 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1076 8084f9f6 Manuel Franceschini

1077 8084f9f6 Manuel Franceschini
  """
1078 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1079 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1080 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1081 8084f9f6 Manuel Franceschini
1082 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1083 8084f9f6 Manuel Franceschini
    """Build hooks env.
1084 8084f9f6 Manuel Franceschini

1085 8084f9f6 Manuel Franceschini
    """
1086 8084f9f6 Manuel Franceschini
    env = {
1087 8084f9f6 Manuel Franceschini
      "OP_TARGET": self.sstore.GetClusterName(),
1088 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1089 8084f9f6 Manuel Franceschini
      }
1090 8084f9f6 Manuel Franceschini
    mn = self.sstore.GetMasterNode()
1091 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1092 8084f9f6 Manuel Franceschini
1093 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1094 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1095 8084f9f6 Manuel Franceschini

1096 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1097 5f83e263 Iustin Pop
    if the given volume group is valid.
1098 8084f9f6 Manuel Franceschini

1099 8084f9f6 Manuel Franceschini
    """
1100 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
1101 8084f9f6 Manuel Franceschini
      instances = [self.cfg.GetInstanceInfo(name)
1102 8084f9f6 Manuel Franceschini
                   for name in self.cfg.GetInstanceList()]
1103 8084f9f6 Manuel Franceschini
      for inst in instances:
1104 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1105 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1106 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1107 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1108 8084f9f6 Manuel Franceschini
1109 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1110 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1111 8084f9f6 Manuel Franceschini
      node_list = self.cfg.GetNodeList()
1112 8084f9f6 Manuel Franceschini
      vglist = rpc.call_vg_list(node_list)
1113 8084f9f6 Manuel Franceschini
      for node in node_list:
1114 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1115 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1116 8084f9f6 Manuel Franceschini
        if vgstatus:
1117 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1118 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1119 8084f9f6 Manuel Franceschini
1120 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1121 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1122 8084f9f6 Manuel Franceschini

1123 8084f9f6 Manuel Franceschini
    """
1124 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
1125 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
1126 8084f9f6 Manuel Franceschini
    else:
1127 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1128 8084f9f6 Manuel Franceschini
                  " state, not changing")
1129 8084f9f6 Manuel Franceschini
1130 8084f9f6 Manuel Franceschini
1131 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1132 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1133 a8083063 Iustin Pop

1134 a8083063 Iustin Pop
  """
1135 a8083063 Iustin Pop
  if not instance.disks:
1136 a8083063 Iustin Pop
    return True
1137 a8083063 Iustin Pop
1138 a8083063 Iustin Pop
  if not oneshot:
1139 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1140 a8083063 Iustin Pop
1141 a8083063 Iustin Pop
  node = instance.primary_node
1142 a8083063 Iustin Pop
1143 a8083063 Iustin Pop
  for dev in instance.disks:
1144 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1145 a8083063 Iustin Pop
1146 a8083063 Iustin Pop
  retries = 0
1147 a8083063 Iustin Pop
  while True:
1148 a8083063 Iustin Pop
    max_time = 0
1149 a8083063 Iustin Pop
    done = True
1150 a8083063 Iustin Pop
    cumul_degraded = False
1151 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1152 a8083063 Iustin Pop
    if not rstats:
1153 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1154 a8083063 Iustin Pop
      retries += 1
1155 a8083063 Iustin Pop
      if retries >= 10:
1156 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1157 3ecf6786 Iustin Pop
                                 " aborting." % node)
1158 a8083063 Iustin Pop
      time.sleep(6)
1159 a8083063 Iustin Pop
      continue
1160 a8083063 Iustin Pop
    retries = 0
1161 a8083063 Iustin Pop
    for i in range(len(rstats)):
1162 a8083063 Iustin Pop
      mstat = rstats[i]
1163 a8083063 Iustin Pop
      if mstat is None:
1164 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1165 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1166 a8083063 Iustin Pop
        continue
1167 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1168 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1169 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1170 a8083063 Iustin Pop
      if perc_done is not None:
1171 a8083063 Iustin Pop
        done = False
1172 a8083063 Iustin Pop
        if est_time is not None:
1173 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1174 a8083063 Iustin Pop
          max_time = est_time
1175 a8083063 Iustin Pop
        else:
1176 a8083063 Iustin Pop
          rem_time = "no time estimate"
1177 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1178 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1179 a8083063 Iustin Pop
    if done or oneshot:
1180 a8083063 Iustin Pop
      break
1181 a8083063 Iustin Pop
1182 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1183 a8083063 Iustin Pop
1184 a8083063 Iustin Pop
  if done:
1185 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1186 a8083063 Iustin Pop
  return not cumul_degraded
1187 a8083063 Iustin Pop
1188 a8083063 Iustin Pop
1189 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1190 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1191 a8083063 Iustin Pop

1192 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1193 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1194 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1195 0834c866 Iustin Pop

1196 a8083063 Iustin Pop
  """
1197 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1198 0834c866 Iustin Pop
  if ldisk:
1199 0834c866 Iustin Pop
    idx = 6
1200 0834c866 Iustin Pop
  else:
1201 0834c866 Iustin Pop
    idx = 5
1202 a8083063 Iustin Pop
1203 a8083063 Iustin Pop
  result = True
1204 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1205 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1206 a8083063 Iustin Pop
    if not rstats:
1207 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1208 a8083063 Iustin Pop
      result = False
1209 a8083063 Iustin Pop
    else:
1210 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1211 a8083063 Iustin Pop
  if dev.children:
1212 a8083063 Iustin Pop
    for child in dev.children:
1213 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1214 a8083063 Iustin Pop
1215 a8083063 Iustin Pop
  return result
1216 a8083063 Iustin Pop
1217 a8083063 Iustin Pop
1218 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1219 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1220 a8083063 Iustin Pop

1221 a8083063 Iustin Pop
  """
1222 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1223 6bf01bbb Guido Trotter
  REQ_BGL = False
1224 a8083063 Iustin Pop
1225 6bf01bbb Guido Trotter
  def ExpandNames(self):
1226 1f9430d6 Iustin Pop
    if self.op.names:
1227 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1228 1f9430d6 Iustin Pop
1229 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1230 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1231 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1232 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1233 1f9430d6 Iustin Pop
1234 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1235 6bf01bbb Guido Trotter
    self.needed_locks = {}
1236 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1237 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1238 6bf01bbb Guido Trotter
1239 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1240 6bf01bbb Guido Trotter
    """Check prerequisites.
1241 6bf01bbb Guido Trotter

1242 6bf01bbb Guido Trotter
    """
1243 6bf01bbb Guido Trotter
1244 1f9430d6 Iustin Pop
  @staticmethod
1245 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1246 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1247 1f9430d6 Iustin Pop

1248 1f9430d6 Iustin Pop
      Args:
1249 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1250 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1251 1f9430d6 Iustin Pop

1252 1f9430d6 Iustin Pop
      Returns:
1253 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1254 1f9430d6 Iustin Pop
             nodes as
1255 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1256 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1257 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1258 1f9430d6 Iustin Pop
                  }
1259 1f9430d6 Iustin Pop

1260 1f9430d6 Iustin Pop
    """
1261 1f9430d6 Iustin Pop
    all_os = {}
1262 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1263 1f9430d6 Iustin Pop
      if not nr:
1264 1f9430d6 Iustin Pop
        continue
1265 b4de68a9 Iustin Pop
      for os_obj in nr:
1266 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1267 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1268 1f9430d6 Iustin Pop
          # for each node in node_list
1269 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1270 1f9430d6 Iustin Pop
          for nname in node_list:
1271 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1272 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1273 1f9430d6 Iustin Pop
    return all_os
1274 a8083063 Iustin Pop
1275 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1276 a8083063 Iustin Pop
    """Compute the list of OSes.
1277 a8083063 Iustin Pop

1278 a8083063 Iustin Pop
    """
1279 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1280 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1281 a8083063 Iustin Pop
    if node_data == False:
1282 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1283 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1284 1f9430d6 Iustin Pop
    output = []
1285 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1286 1f9430d6 Iustin Pop
      row = []
1287 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1288 1f9430d6 Iustin Pop
        if field == "name":
1289 1f9430d6 Iustin Pop
          val = os_name
1290 1f9430d6 Iustin Pop
        elif field == "valid":
1291 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1292 1f9430d6 Iustin Pop
        elif field == "node_status":
1293 1f9430d6 Iustin Pop
          val = {}
1294 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1295 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1296 1f9430d6 Iustin Pop
        else:
1297 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1298 1f9430d6 Iustin Pop
        row.append(val)
1299 1f9430d6 Iustin Pop
      output.append(row)
1300 1f9430d6 Iustin Pop
1301 1f9430d6 Iustin Pop
    return output
1302 a8083063 Iustin Pop
1303 a8083063 Iustin Pop
1304 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1305 a8083063 Iustin Pop
  """Logical unit for removing a node.
1306 a8083063 Iustin Pop

1307 a8083063 Iustin Pop
  """
1308 a8083063 Iustin Pop
  HPATH = "node-remove"
1309 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1310 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1311 a8083063 Iustin Pop
1312 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1313 a8083063 Iustin Pop
    """Build hooks env.
1314 a8083063 Iustin Pop

1315 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1316 d08869ee Guido Trotter
    node would then be impossible to remove.
1317 a8083063 Iustin Pop

1318 a8083063 Iustin Pop
    """
1319 396e1b78 Michael Hanselmann
    env = {
1320 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1321 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1322 396e1b78 Michael Hanselmann
      }
1323 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1324 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1325 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1326 a8083063 Iustin Pop
1327 a8083063 Iustin Pop
  def CheckPrereq(self):
1328 a8083063 Iustin Pop
    """Check prerequisites.
1329 a8083063 Iustin Pop

1330 a8083063 Iustin Pop
    This checks:
1331 a8083063 Iustin Pop
     - the node exists in the configuration
1332 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1333 a8083063 Iustin Pop
     - it's not the master
1334 a8083063 Iustin Pop

1335 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1336 a8083063 Iustin Pop

1337 a8083063 Iustin Pop
    """
1338 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1339 a8083063 Iustin Pop
    if node is None:
1340 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1341 a8083063 Iustin Pop
1342 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1343 a8083063 Iustin Pop
1344 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1345 a8083063 Iustin Pop
    if node.name == masternode:
1346 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1347 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1348 a8083063 Iustin Pop
1349 a8083063 Iustin Pop
    for instance_name in instance_list:
1350 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1351 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1352 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1353 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1354 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1355 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1356 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1357 a8083063 Iustin Pop
    self.op.node_name = node.name
1358 a8083063 Iustin Pop
    self.node = node
1359 a8083063 Iustin Pop
1360 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1361 a8083063 Iustin Pop
    """Removes the node from the cluster.
1362 a8083063 Iustin Pop

1363 a8083063 Iustin Pop
    """
1364 a8083063 Iustin Pop
    node = self.node
1365 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1366 a8083063 Iustin Pop
                node.name)
1367 a8083063 Iustin Pop
1368 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1369 a8083063 Iustin Pop
1370 d8470559 Michael Hanselmann
    rpc.call_node_leave_cluster(node.name)
1371 c8a0948f Michael Hanselmann
1372 a8083063 Iustin Pop
1373 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1374 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1375 a8083063 Iustin Pop

1376 a8083063 Iustin Pop
  """
1377 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1378 35705d8f Guido Trotter
  REQ_BGL = False
1379 a8083063 Iustin Pop
1380 35705d8f Guido Trotter
  def ExpandNames(self):
1381 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1382 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1383 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1384 e8a4c138 Iustin Pop
      "bootid",
1385 e8a4c138 Iustin Pop
      "ctotal",
1386 e8a4c138 Iustin Pop
      ])
1387 a8083063 Iustin Pop
1388 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1389 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1390 130a6a6f Iustin Pop
                               "pip", "sip", "tags"],
1391 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1392 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1393 a8083063 Iustin Pop
1394 35705d8f Guido Trotter
    self.needed_locks = {}
1395 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1396 35705d8f Guido Trotter
    # TODO: we could lock nodes only if the user asked for dynamic fields. For
1397 35705d8f Guido Trotter
    # that we need atomic ways to get info for a group of nodes from the
1398 35705d8f Guido Trotter
    # config, though.
1399 35705d8f Guido Trotter
    if not self.op.names:
1400 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1401 35705d8f Guido Trotter
    else:
1402 b91a34a5 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1403 b91a34a5 Guido Trotter
        _GetWantedNodes(self, self.op.names)
1404 35705d8f Guido Trotter
1405 35705d8f Guido Trotter
  def CheckPrereq(self):
1406 35705d8f Guido Trotter
    """Check prerequisites.
1407 35705d8f Guido Trotter

1408 35705d8f Guido Trotter
    """
1409 35705d8f Guido Trotter
    # This of course is valid only if we locked the nodes
1410 6683bba2 Guido Trotter
    self.wanted = self.acquired_locks[locking.LEVEL_NODE]
1411 a8083063 Iustin Pop
1412 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1413 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1414 a8083063 Iustin Pop

1415 a8083063 Iustin Pop
    """
1416 246e180a Iustin Pop
    nodenames = self.wanted
1417 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1418 a8083063 Iustin Pop
1419 a8083063 Iustin Pop
    # begin data gathering
1420 a8083063 Iustin Pop
1421 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1422 a8083063 Iustin Pop
      live_data = {}
1423 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1424 a8083063 Iustin Pop
      for name in nodenames:
1425 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1426 a8083063 Iustin Pop
        if nodeinfo:
1427 a8083063 Iustin Pop
          live_data[name] = {
1428 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1429 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1430 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1431 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1432 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1433 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1434 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1435 a8083063 Iustin Pop
            }
1436 a8083063 Iustin Pop
        else:
1437 a8083063 Iustin Pop
          live_data[name] = {}
1438 a8083063 Iustin Pop
    else:
1439 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1440 a8083063 Iustin Pop
1441 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1442 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1443 a8083063 Iustin Pop
1444 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1445 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1446 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1447 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1448 a8083063 Iustin Pop
1449 ec223efb Iustin Pop
      for instance_name in instancelist:
1450 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1451 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1452 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1453 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1454 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1455 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1456 a8083063 Iustin Pop
1457 a8083063 Iustin Pop
    # end data gathering
1458 a8083063 Iustin Pop
1459 a8083063 Iustin Pop
    output = []
1460 a8083063 Iustin Pop
    for node in nodelist:
1461 a8083063 Iustin Pop
      node_output = []
1462 a8083063 Iustin Pop
      for field in self.op.output_fields:
1463 a8083063 Iustin Pop
        if field == "name":
1464 a8083063 Iustin Pop
          val = node.name
1465 ec223efb Iustin Pop
        elif field == "pinst_list":
1466 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1467 ec223efb Iustin Pop
        elif field == "sinst_list":
1468 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1469 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1470 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1471 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1472 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1473 a8083063 Iustin Pop
        elif field == "pip":
1474 a8083063 Iustin Pop
          val = node.primary_ip
1475 a8083063 Iustin Pop
        elif field == "sip":
1476 a8083063 Iustin Pop
          val = node.secondary_ip
1477 130a6a6f Iustin Pop
        elif field == "tags":
1478 130a6a6f Iustin Pop
          val = list(node.GetTags())
1479 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1480 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1481 a8083063 Iustin Pop
        else:
1482 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1483 a8083063 Iustin Pop
        node_output.append(val)
1484 a8083063 Iustin Pop
      output.append(node_output)
1485 a8083063 Iustin Pop
1486 a8083063 Iustin Pop
    return output
1487 a8083063 Iustin Pop
1488 a8083063 Iustin Pop
1489 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1490 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1491 dcb93971 Michael Hanselmann

1492 dcb93971 Michael Hanselmann
  """
1493 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1494 21a15682 Guido Trotter
  REQ_BGL = False
1495 21a15682 Guido Trotter
1496 21a15682 Guido Trotter
  def ExpandNames(self):
1497 21a15682 Guido Trotter
    _CheckOutputFields(static=["node"],
1498 21a15682 Guido Trotter
                       dynamic=["phys", "vg", "name", "size", "instance"],
1499 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1500 21a15682 Guido Trotter
1501 21a15682 Guido Trotter
    self.needed_locks = {}
1502 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1503 21a15682 Guido Trotter
    if not self.op.nodes:
1504 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1505 21a15682 Guido Trotter
    else:
1506 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1507 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1508 dcb93971 Michael Hanselmann
1509 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1510 dcb93971 Michael Hanselmann
    """Check prerequisites.
1511 dcb93971 Michael Hanselmann

1512 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1513 dcb93971 Michael Hanselmann

1514 dcb93971 Michael Hanselmann
    """
1515 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1516 dcb93971 Michael Hanselmann
1517 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1518 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1519 dcb93971 Michael Hanselmann

1520 dcb93971 Michael Hanselmann
    """
1521 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1522 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1523 dcb93971 Michael Hanselmann
1524 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1525 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1526 dcb93971 Michael Hanselmann
1527 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1528 dcb93971 Michael Hanselmann
1529 dcb93971 Michael Hanselmann
    output = []
1530 dcb93971 Michael Hanselmann
    for node in nodenames:
1531 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1532 37d19eb2 Michael Hanselmann
        continue
1533 37d19eb2 Michael Hanselmann
1534 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1535 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1536 dcb93971 Michael Hanselmann
1537 dcb93971 Michael Hanselmann
      for vol in node_vols:
1538 dcb93971 Michael Hanselmann
        node_output = []
1539 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1540 dcb93971 Michael Hanselmann
          if field == "node":
1541 dcb93971 Michael Hanselmann
            val = node
1542 dcb93971 Michael Hanselmann
          elif field == "phys":
1543 dcb93971 Michael Hanselmann
            val = vol['dev']
1544 dcb93971 Michael Hanselmann
          elif field == "vg":
1545 dcb93971 Michael Hanselmann
            val = vol['vg']
1546 dcb93971 Michael Hanselmann
          elif field == "name":
1547 dcb93971 Michael Hanselmann
            val = vol['name']
1548 dcb93971 Michael Hanselmann
          elif field == "size":
1549 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1550 dcb93971 Michael Hanselmann
          elif field == "instance":
1551 dcb93971 Michael Hanselmann
            for inst in ilist:
1552 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1553 dcb93971 Michael Hanselmann
                continue
1554 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1555 dcb93971 Michael Hanselmann
                val = inst.name
1556 dcb93971 Michael Hanselmann
                break
1557 dcb93971 Michael Hanselmann
            else:
1558 dcb93971 Michael Hanselmann
              val = '-'
1559 dcb93971 Michael Hanselmann
          else:
1560 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1561 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1562 dcb93971 Michael Hanselmann
1563 dcb93971 Michael Hanselmann
        output.append(node_output)
1564 dcb93971 Michael Hanselmann
1565 dcb93971 Michael Hanselmann
    return output
1566 dcb93971 Michael Hanselmann
1567 dcb93971 Michael Hanselmann
1568 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1569 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1570 a8083063 Iustin Pop

1571 a8083063 Iustin Pop
  """
1572 a8083063 Iustin Pop
  HPATH = "node-add"
1573 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1574 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1575 a8083063 Iustin Pop
1576 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1577 a8083063 Iustin Pop
    """Build hooks env.
1578 a8083063 Iustin Pop

1579 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1580 a8083063 Iustin Pop

1581 a8083063 Iustin Pop
    """
1582 a8083063 Iustin Pop
    env = {
1583 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1584 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1585 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1586 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1587 a8083063 Iustin Pop
      }
1588 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1589 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1590 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1591 a8083063 Iustin Pop
1592 a8083063 Iustin Pop
  def CheckPrereq(self):
1593 a8083063 Iustin Pop
    """Check prerequisites.
1594 a8083063 Iustin Pop

1595 a8083063 Iustin Pop
    This checks:
1596 a8083063 Iustin Pop
     - the new node is not already in the config
1597 a8083063 Iustin Pop
     - it is resolvable
1598 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1599 a8083063 Iustin Pop

1600 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1601 a8083063 Iustin Pop

1602 a8083063 Iustin Pop
    """
1603 a8083063 Iustin Pop
    node_name = self.op.node_name
1604 a8083063 Iustin Pop
    cfg = self.cfg
1605 a8083063 Iustin Pop
1606 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1607 a8083063 Iustin Pop
1608 bcf043c9 Iustin Pop
    node = dns_data.name
1609 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1610 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1611 a8083063 Iustin Pop
    if secondary_ip is None:
1612 a8083063 Iustin Pop
      secondary_ip = primary_ip
1613 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1614 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1615 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1616 e7c6e02b Michael Hanselmann
1617 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1618 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1619 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1620 e7c6e02b Michael Hanselmann
                                 node)
1621 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1622 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1623 a8083063 Iustin Pop
1624 a8083063 Iustin Pop
    for existing_node_name in node_list:
1625 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1626 e7c6e02b Michael Hanselmann
1627 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1628 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1629 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1630 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1631 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1632 e7c6e02b Michael Hanselmann
        continue
1633 e7c6e02b Michael Hanselmann
1634 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1635 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1636 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1637 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1638 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1639 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1640 a8083063 Iustin Pop
1641 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1642 a8083063 Iustin Pop
    # same as for the master
1643 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1644 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1645 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1646 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1647 a8083063 Iustin Pop
      if master_singlehomed:
1648 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1649 3ecf6786 Iustin Pop
                                   " new node has one")
1650 a8083063 Iustin Pop
      else:
1651 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1652 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1653 a8083063 Iustin Pop
1654 a8083063 Iustin Pop
    # checks reachablity
1655 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1656 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1657 a8083063 Iustin Pop
1658 a8083063 Iustin Pop
    if not newbie_singlehomed:
1659 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1660 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1661 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1662 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1663 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1664 a8083063 Iustin Pop
1665 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1666 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1667 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1668 a8083063 Iustin Pop
1669 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1670 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1671 a8083063 Iustin Pop

1672 a8083063 Iustin Pop
    """
1673 a8083063 Iustin Pop
    new_node = self.new_node
1674 a8083063 Iustin Pop
    node = new_node.name
1675 a8083063 Iustin Pop
1676 a8083063 Iustin Pop
    # check connectivity
1677 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1678 a8083063 Iustin Pop
    if result:
1679 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1680 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1681 a8083063 Iustin Pop
                    (node, result))
1682 a8083063 Iustin Pop
      else:
1683 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1684 3ecf6786 Iustin Pop
                                 " node version %s" %
1685 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1686 a8083063 Iustin Pop
    else:
1687 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1688 a8083063 Iustin Pop
1689 a8083063 Iustin Pop
    # setup ssh on node
1690 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1691 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1692 a8083063 Iustin Pop
    keyarray = []
1693 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1694 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1695 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1696 a8083063 Iustin Pop
1697 a8083063 Iustin Pop
    for i in keyfiles:
1698 a8083063 Iustin Pop
      f = open(i, 'r')
1699 a8083063 Iustin Pop
      try:
1700 a8083063 Iustin Pop
        keyarray.append(f.read())
1701 a8083063 Iustin Pop
      finally:
1702 a8083063 Iustin Pop
        f.close()
1703 a8083063 Iustin Pop
1704 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1705 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1706 a8083063 Iustin Pop
1707 a8083063 Iustin Pop
    if not result:
1708 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1709 a8083063 Iustin Pop
1710 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1711 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1712 c8a0948f Michael Hanselmann
1713 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1714 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1715 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1716 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1717 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1718 16abfbc2 Alexander Schreiber
                                    10, False):
1719 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1720 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1721 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1722 a8083063 Iustin Pop
1723 5c0527ed Guido Trotter
    node_verify_list = [self.sstore.GetMasterNode()]
1724 5c0527ed Guido Trotter
    node_verify_param = {
1725 5c0527ed Guido Trotter
      'nodelist': [node],
1726 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1727 5c0527ed Guido Trotter
    }
1728 5c0527ed Guido Trotter
1729 5c0527ed Guido Trotter
    result = rpc.call_node_verify(node_verify_list, node_verify_param)
1730 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1731 5c0527ed Guido Trotter
      if not result[verifier]:
1732 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1733 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1734 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1735 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1736 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1737 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1738 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1739 ff98055b Iustin Pop
1740 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1741 a8083063 Iustin Pop
    # including the node just added
1742 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1743 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1744 102b115b Michael Hanselmann
    if not self.op.readd:
1745 102b115b Michael Hanselmann
      dist_nodes.append(node)
1746 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1747 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1748 a8083063 Iustin Pop
1749 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1750 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1751 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1752 a8083063 Iustin Pop
      for to_node in dist_nodes:
1753 a8083063 Iustin Pop
        if not result[to_node]:
1754 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1755 a8083063 Iustin Pop
                       (fname, to_node))
1756 a8083063 Iustin Pop
1757 3d1e7706 Guido Trotter
    to_copy = self.sstore.GetFileList()
1758 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1759 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1760 a8083063 Iustin Pop
    for fname in to_copy:
1761 b5602d15 Guido Trotter
      result = rpc.call_upload_file([node], fname)
1762 b5602d15 Guido Trotter
      if not result[node]:
1763 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1764 a8083063 Iustin Pop
1765 d8470559 Michael Hanselmann
    if self.op.readd:
1766 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1767 d8470559 Michael Hanselmann
    else:
1768 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1769 a8083063 Iustin Pop
1770 a8083063 Iustin Pop
1771 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1772 a8083063 Iustin Pop
  """Query cluster configuration.
1773 a8083063 Iustin Pop

1774 a8083063 Iustin Pop
  """
1775 a8083063 Iustin Pop
  _OP_REQP = []
1776 59322403 Iustin Pop
  REQ_MASTER = False
1777 642339cf Guido Trotter
  REQ_BGL = False
1778 642339cf Guido Trotter
1779 642339cf Guido Trotter
  def ExpandNames(self):
1780 642339cf Guido Trotter
    self.needed_locks = {}
1781 a8083063 Iustin Pop
1782 a8083063 Iustin Pop
  def CheckPrereq(self):
1783 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1784 a8083063 Iustin Pop

1785 a8083063 Iustin Pop
    """
1786 a8083063 Iustin Pop
    pass
1787 a8083063 Iustin Pop
1788 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1789 a8083063 Iustin Pop
    """Return cluster config.
1790 a8083063 Iustin Pop

1791 a8083063 Iustin Pop
    """
1792 a8083063 Iustin Pop
    result = {
1793 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1794 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1795 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1796 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1797 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1798 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1799 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1800 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1801 8a12ce45 Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
1802 a8083063 Iustin Pop
      }
1803 a8083063 Iustin Pop
1804 a8083063 Iustin Pop
    return result
1805 a8083063 Iustin Pop
1806 a8083063 Iustin Pop
1807 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1808 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1809 a8083063 Iustin Pop

1810 a8083063 Iustin Pop
  """
1811 a8083063 Iustin Pop
  _OP_REQP = []
1812 642339cf Guido Trotter
  REQ_BGL = False
1813 642339cf Guido Trotter
1814 642339cf Guido Trotter
  def ExpandNames(self):
1815 642339cf Guido Trotter
    self.needed_locks = {}
1816 a8083063 Iustin Pop
1817 a8083063 Iustin Pop
  def CheckPrereq(self):
1818 a8083063 Iustin Pop
    """No prerequisites.
1819 a8083063 Iustin Pop

1820 a8083063 Iustin Pop
    """
1821 a8083063 Iustin Pop
    pass
1822 a8083063 Iustin Pop
1823 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1824 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1825 a8083063 Iustin Pop

1826 a8083063 Iustin Pop
    """
1827 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1828 a8083063 Iustin Pop
1829 a8083063 Iustin Pop
1830 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1831 a8083063 Iustin Pop
  """Bring up an instance's disks.
1832 a8083063 Iustin Pop

1833 a8083063 Iustin Pop
  """
1834 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1835 a8083063 Iustin Pop
1836 a8083063 Iustin Pop
  def CheckPrereq(self):
1837 a8083063 Iustin Pop
    """Check prerequisites.
1838 a8083063 Iustin Pop

1839 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1840 a8083063 Iustin Pop

1841 a8083063 Iustin Pop
    """
1842 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1843 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1844 a8083063 Iustin Pop
    if instance is None:
1845 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1846 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1847 a8083063 Iustin Pop
    self.instance = instance
1848 a8083063 Iustin Pop
1849 a8083063 Iustin Pop
1850 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1851 a8083063 Iustin Pop
    """Activate the disks.
1852 a8083063 Iustin Pop

1853 a8083063 Iustin Pop
    """
1854 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1855 a8083063 Iustin Pop
    if not disks_ok:
1856 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1857 a8083063 Iustin Pop
1858 a8083063 Iustin Pop
    return disks_info
1859 a8083063 Iustin Pop
1860 a8083063 Iustin Pop
1861 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1862 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1863 a8083063 Iustin Pop

1864 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1865 a8083063 Iustin Pop

1866 a8083063 Iustin Pop
  Args:
1867 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1868 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1869 a8083063 Iustin Pop
                        in an error return from the function
1870 a8083063 Iustin Pop

1871 a8083063 Iustin Pop
  Returns:
1872 a8083063 Iustin Pop
    false if the operation failed
1873 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1874 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1875 a8083063 Iustin Pop
  """
1876 a8083063 Iustin Pop
  device_info = []
1877 a8083063 Iustin Pop
  disks_ok = True
1878 fdbd668d Iustin Pop
  iname = instance.name
1879 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1880 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1881 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1882 fdbd668d Iustin Pop
1883 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1884 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1885 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1886 fdbd668d Iustin Pop
  # SyncSource, etc.)
1887 fdbd668d Iustin Pop
1888 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1889 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1890 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1891 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1892 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1893 a8083063 Iustin Pop
      if not result:
1894 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1895 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1896 fdbd668d Iustin Pop
        if not ignore_secondaries:
1897 a8083063 Iustin Pop
          disks_ok = False
1898 fdbd668d Iustin Pop
1899 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1900 fdbd668d Iustin Pop
1901 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1902 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1903 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1904 fdbd668d Iustin Pop
      if node != instance.primary_node:
1905 fdbd668d Iustin Pop
        continue
1906 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
1907 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1908 fdbd668d Iustin Pop
      if not result:
1909 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1910 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1911 fdbd668d Iustin Pop
        disks_ok = False
1912 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1913 a8083063 Iustin Pop
1914 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1915 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1916 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1917 b352ab5b Iustin Pop
  for disk in instance.disks:
1918 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1919 b352ab5b Iustin Pop
1920 a8083063 Iustin Pop
  return disks_ok, device_info
1921 a8083063 Iustin Pop
1922 a8083063 Iustin Pop
1923 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1924 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1925 3ecf6786 Iustin Pop

1926 3ecf6786 Iustin Pop
  """
1927 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1928 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1929 fe7b0351 Michael Hanselmann
  if not disks_ok:
1930 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1931 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1932 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1933 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1934 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1935 fe7b0351 Michael Hanselmann
1936 fe7b0351 Michael Hanselmann
1937 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1938 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1939 a8083063 Iustin Pop

1940 a8083063 Iustin Pop
  """
1941 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1942 a8083063 Iustin Pop
1943 a8083063 Iustin Pop
  def CheckPrereq(self):
1944 a8083063 Iustin Pop
    """Check prerequisites.
1945 a8083063 Iustin Pop

1946 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1947 a8083063 Iustin Pop

1948 a8083063 Iustin Pop
    """
1949 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1950 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1951 a8083063 Iustin Pop
    if instance is None:
1952 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1953 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1954 a8083063 Iustin Pop
    self.instance = instance
1955 a8083063 Iustin Pop
1956 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1957 a8083063 Iustin Pop
    """Deactivate the disks
1958 a8083063 Iustin Pop

1959 a8083063 Iustin Pop
    """
1960 a8083063 Iustin Pop
    instance = self.instance
1961 155d6c75 Guido Trotter
    _SafeShutdownInstanceDisks(instance, self.cfg)
1962 a8083063 Iustin Pop
1963 a8083063 Iustin Pop
1964 155d6c75 Guido Trotter
def _SafeShutdownInstanceDisks(instance, cfg):
1965 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
1966 155d6c75 Guido Trotter

1967 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
1968 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
1969 155d6c75 Guido Trotter

1970 155d6c75 Guido Trotter
  """
1971 155d6c75 Guido Trotter
  ins_l = rpc.call_instance_list([instance.primary_node])
1972 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
1973 155d6c75 Guido Trotter
  if not type(ins_l) is list:
1974 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
1975 155d6c75 Guido Trotter
                             instance.primary_node)
1976 155d6c75 Guido Trotter
1977 155d6c75 Guido Trotter
  if instance.name in ins_l:
1978 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
1979 155d6c75 Guido Trotter
                             " block devices.")
1980 155d6c75 Guido Trotter
1981 155d6c75 Guido Trotter
  _ShutdownInstanceDisks(instance, cfg)
1982 a8083063 Iustin Pop
1983 a8083063 Iustin Pop
1984 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1985 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1986 a8083063 Iustin Pop

1987 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1988 a8083063 Iustin Pop

1989 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1990 a8083063 Iustin Pop
  ignored.
1991 a8083063 Iustin Pop

1992 a8083063 Iustin Pop
  """
1993 a8083063 Iustin Pop
  result = True
1994 a8083063 Iustin Pop
  for disk in instance.disks:
1995 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1996 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1997 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1998 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1999 a8083063 Iustin Pop
                     (disk.iv_name, node))
2000 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2001 a8083063 Iustin Pop
          result = False
2002 a8083063 Iustin Pop
  return result
2003 a8083063 Iustin Pop
2004 a8083063 Iustin Pop
2005 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
2006 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2007 d4f16fd9 Iustin Pop

2008 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2009 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2010 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2011 d4f16fd9 Iustin Pop
  exception.
2012 d4f16fd9 Iustin Pop

2013 d4f16fd9 Iustin Pop
  Args:
2014 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
2015 d4f16fd9 Iustin Pop
    - node: the node name
2016 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
2017 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
2018 d4f16fd9 Iustin Pop

2019 d4f16fd9 Iustin Pop
  """
2020 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
2021 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2022 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2023 d4f16fd9 Iustin Pop
                             " information" % (node,))
2024 d4f16fd9 Iustin Pop
2025 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2026 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2027 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2028 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2029 d4f16fd9 Iustin Pop
  if requested > free_mem:
2030 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2031 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2032 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2033 d4f16fd9 Iustin Pop
2034 d4f16fd9 Iustin Pop
2035 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2036 a8083063 Iustin Pop
  """Starts an instance.
2037 a8083063 Iustin Pop

2038 a8083063 Iustin Pop
  """
2039 a8083063 Iustin Pop
  HPATH = "instance-start"
2040 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2041 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2042 e873317a Guido Trotter
  REQ_BGL = False
2043 e873317a Guido Trotter
2044 e873317a Guido Trotter
  def ExpandNames(self):
2045 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2046 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2047 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2048 e873317a Guido Trotter
2049 e873317a Guido Trotter
  def DeclareLocks(self, level):
2050 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2051 e873317a Guido Trotter
      self._LockInstancesNodes()
2052 a8083063 Iustin Pop
2053 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2054 a8083063 Iustin Pop
    """Build hooks env.
2055 a8083063 Iustin Pop

2056 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2057 a8083063 Iustin Pop

2058 a8083063 Iustin Pop
    """
2059 a8083063 Iustin Pop
    env = {
2060 a8083063 Iustin Pop
      "FORCE": self.op.force,
2061 a8083063 Iustin Pop
      }
2062 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2063 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2064 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2065 a8083063 Iustin Pop
    return env, nl, nl
2066 a8083063 Iustin Pop
2067 a8083063 Iustin Pop
  def CheckPrereq(self):
2068 a8083063 Iustin Pop
    """Check prerequisites.
2069 a8083063 Iustin Pop

2070 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2071 a8083063 Iustin Pop

2072 a8083063 Iustin Pop
    """
2073 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2074 e873317a Guido Trotter
    assert self.instance is not None, \
2075 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2076 a8083063 Iustin Pop
2077 a8083063 Iustin Pop
    # check bridges existance
2078 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2079 a8083063 Iustin Pop
2080 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2081 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2082 d4f16fd9 Iustin Pop
                         instance.memory)
2083 d4f16fd9 Iustin Pop
2084 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2085 a8083063 Iustin Pop
    """Start the instance.
2086 a8083063 Iustin Pop

2087 a8083063 Iustin Pop
    """
2088 a8083063 Iustin Pop
    instance = self.instance
2089 a8083063 Iustin Pop
    force = self.op.force
2090 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2091 a8083063 Iustin Pop
2092 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2093 fe482621 Iustin Pop
2094 a8083063 Iustin Pop
    node_current = instance.primary_node
2095 a8083063 Iustin Pop
2096 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2097 a8083063 Iustin Pop
2098 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2099 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2100 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2101 a8083063 Iustin Pop
2102 a8083063 Iustin Pop
2103 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2104 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2105 bf6929a2 Alexander Schreiber

2106 bf6929a2 Alexander Schreiber
  """
2107 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2108 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2109 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2110 e873317a Guido Trotter
  REQ_BGL = False
2111 e873317a Guido Trotter
2112 e873317a Guido Trotter
  def ExpandNames(self):
2113 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2114 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2115 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2116 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2117 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2118 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2119 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2120 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2121 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2122 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2123 e873317a Guido Trotter
2124 e873317a Guido Trotter
  def DeclareLocks(self, level):
2125 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2126 849da276 Guido Trotter
      primary_only = not constants.INSTANCE_REBOOT_FULL
2127 849da276 Guido Trotter
      self._LockInstancesNodes(primary_only=primary_only)
2128 bf6929a2 Alexander Schreiber
2129 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2130 bf6929a2 Alexander Schreiber
    """Build hooks env.
2131 bf6929a2 Alexander Schreiber

2132 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2133 bf6929a2 Alexander Schreiber

2134 bf6929a2 Alexander Schreiber
    """
2135 bf6929a2 Alexander Schreiber
    env = {
2136 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2137 bf6929a2 Alexander Schreiber
      }
2138 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2139 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2140 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2141 bf6929a2 Alexander Schreiber
    return env, nl, nl
2142 bf6929a2 Alexander Schreiber
2143 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2144 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2145 bf6929a2 Alexander Schreiber

2146 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2147 bf6929a2 Alexander Schreiber

2148 bf6929a2 Alexander Schreiber
    """
2149 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2150 e873317a Guido Trotter
    assert self.instance is not None, \
2151 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2152 bf6929a2 Alexander Schreiber
2153 bf6929a2 Alexander Schreiber
    # check bridges existance
2154 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2155 bf6929a2 Alexander Schreiber
2156 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2157 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2158 bf6929a2 Alexander Schreiber

2159 bf6929a2 Alexander Schreiber
    """
2160 bf6929a2 Alexander Schreiber
    instance = self.instance
2161 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2162 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2163 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2164 bf6929a2 Alexander Schreiber
2165 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2166 bf6929a2 Alexander Schreiber
2167 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2168 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2169 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2170 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2171 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2172 bf6929a2 Alexander Schreiber
    else:
2173 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2174 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2175 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2176 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2177 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2178 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2179 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2180 bf6929a2 Alexander Schreiber
2181 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2182 bf6929a2 Alexander Schreiber
2183 bf6929a2 Alexander Schreiber
2184 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2185 a8083063 Iustin Pop
  """Shutdown an instance.
2186 a8083063 Iustin Pop

2187 a8083063 Iustin Pop
  """
2188 a8083063 Iustin Pop
  HPATH = "instance-stop"
2189 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2190 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2191 e873317a Guido Trotter
  REQ_BGL = False
2192 e873317a Guido Trotter
2193 e873317a Guido Trotter
  def ExpandNames(self):
2194 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2195 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2196 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2197 e873317a Guido Trotter
2198 e873317a Guido Trotter
  def DeclareLocks(self, level):
2199 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2200 e873317a Guido Trotter
      self._LockInstancesNodes()
2201 a8083063 Iustin Pop
2202 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2203 a8083063 Iustin Pop
    """Build hooks env.
2204 a8083063 Iustin Pop

2205 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2206 a8083063 Iustin Pop

2207 a8083063 Iustin Pop
    """
2208 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2209 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2210 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2211 a8083063 Iustin Pop
    return env, nl, nl
2212 a8083063 Iustin Pop
2213 a8083063 Iustin Pop
  def CheckPrereq(self):
2214 a8083063 Iustin Pop
    """Check prerequisites.
2215 a8083063 Iustin Pop

2216 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2217 a8083063 Iustin Pop

2218 a8083063 Iustin Pop
    """
2219 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2220 e873317a Guido Trotter
    assert self.instance is not None, \
2221 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2222 a8083063 Iustin Pop
2223 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2224 a8083063 Iustin Pop
    """Shutdown the instance.
2225 a8083063 Iustin Pop

2226 a8083063 Iustin Pop
    """
2227 a8083063 Iustin Pop
    instance = self.instance
2228 a8083063 Iustin Pop
    node_current = instance.primary_node
2229 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2230 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2231 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2232 a8083063 Iustin Pop
2233 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2234 a8083063 Iustin Pop
2235 a8083063 Iustin Pop
2236 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2237 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2238 fe7b0351 Michael Hanselmann

2239 fe7b0351 Michael Hanselmann
  """
2240 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2241 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2242 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2243 4e0b4d2d Guido Trotter
  REQ_BGL = False
2244 4e0b4d2d Guido Trotter
2245 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2246 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2247 4e0b4d2d Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2248 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2249 4e0b4d2d Guido Trotter
2250 4e0b4d2d Guido Trotter
  def DeclareLocks(self, level):
2251 4e0b4d2d Guido Trotter
    if level == locking.LEVEL_NODE:
2252 4e0b4d2d Guido Trotter
      self._LockInstancesNodes()
2253 fe7b0351 Michael Hanselmann
2254 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2255 fe7b0351 Michael Hanselmann
    """Build hooks env.
2256 fe7b0351 Michael Hanselmann

2257 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2258 fe7b0351 Michael Hanselmann

2259 fe7b0351 Michael Hanselmann
    """
2260 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2261 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2262 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2263 fe7b0351 Michael Hanselmann
    return env, nl, nl
2264 fe7b0351 Michael Hanselmann
2265 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2266 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2267 fe7b0351 Michael Hanselmann

2268 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2269 fe7b0351 Michael Hanselmann

2270 fe7b0351 Michael Hanselmann
    """
2271 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2272 4e0b4d2d Guido Trotter
    assert instance is not None, \
2273 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2274 4e0b4d2d Guido Trotter
2275 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2276 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2277 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2278 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2279 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2280 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2281 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2282 fe7b0351 Michael Hanselmann
    if remote_info:
2283 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2284 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2285 3ecf6786 Iustin Pop
                                  instance.primary_node))
2286 d0834de3 Michael Hanselmann
2287 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2288 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2289 d0834de3 Michael Hanselmann
      # OS verification
2290 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2291 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2292 d0834de3 Michael Hanselmann
      if pnode is None:
2293 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2294 3ecf6786 Iustin Pop
                                   self.op.pnode)
2295 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2296 dfa96ded Guido Trotter
      if not os_obj:
2297 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2298 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2299 d0834de3 Michael Hanselmann
2300 fe7b0351 Michael Hanselmann
    self.instance = instance
2301 fe7b0351 Michael Hanselmann
2302 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2303 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2304 fe7b0351 Michael Hanselmann

2305 fe7b0351 Michael Hanselmann
    """
2306 fe7b0351 Michael Hanselmann
    inst = self.instance
2307 fe7b0351 Michael Hanselmann
2308 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2309 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2310 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2311 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2312 d0834de3 Michael Hanselmann
2313 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2314 fe7b0351 Michael Hanselmann
    try:
2315 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2316 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2317 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2318 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2319 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2320 fe7b0351 Michael Hanselmann
    finally:
2321 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2322 fe7b0351 Michael Hanselmann
2323 fe7b0351 Michael Hanselmann
2324 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2325 decd5f45 Iustin Pop
  """Rename an instance.
2326 decd5f45 Iustin Pop

2327 decd5f45 Iustin Pop
  """
2328 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2329 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2330 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2331 decd5f45 Iustin Pop
2332 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2333 decd5f45 Iustin Pop
    """Build hooks env.
2334 decd5f45 Iustin Pop

2335 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2336 decd5f45 Iustin Pop

2337 decd5f45 Iustin Pop
    """
2338 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2339 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2340 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2341 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2342 decd5f45 Iustin Pop
    return env, nl, nl
2343 decd5f45 Iustin Pop
2344 decd5f45 Iustin Pop
  def CheckPrereq(self):
2345 decd5f45 Iustin Pop
    """Check prerequisites.
2346 decd5f45 Iustin Pop

2347 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2348 decd5f45 Iustin Pop

2349 decd5f45 Iustin Pop
    """
2350 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2351 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2352 decd5f45 Iustin Pop
    if instance is None:
2353 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2354 decd5f45 Iustin Pop
                                 self.op.instance_name)
2355 decd5f45 Iustin Pop
    if instance.status != "down":
2356 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2357 decd5f45 Iustin Pop
                                 self.op.instance_name)
2358 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2359 decd5f45 Iustin Pop
    if remote_info:
2360 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2361 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2362 decd5f45 Iustin Pop
                                  instance.primary_node))
2363 decd5f45 Iustin Pop
    self.instance = instance
2364 decd5f45 Iustin Pop
2365 decd5f45 Iustin Pop
    # new name verification
2366 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2367 decd5f45 Iustin Pop
2368 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2369 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2370 7bde3275 Guido Trotter
    if new_name in instance_list:
2371 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2372 c09f363f Manuel Franceschini
                                 new_name)
2373 7bde3275 Guido Trotter
2374 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2375 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2376 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2377 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2378 decd5f45 Iustin Pop
2379 decd5f45 Iustin Pop
2380 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2381 decd5f45 Iustin Pop
    """Reinstall the instance.
2382 decd5f45 Iustin Pop

2383 decd5f45 Iustin Pop
    """
2384 decd5f45 Iustin Pop
    inst = self.instance
2385 decd5f45 Iustin Pop
    old_name = inst.name
2386 decd5f45 Iustin Pop
2387 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2388 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2389 b23c4333 Manuel Franceschini
2390 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2391 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2392 74b5913f Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
2393 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2394 decd5f45 Iustin Pop
2395 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2396 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2397 decd5f45 Iustin Pop
2398 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2399 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2400 b23c4333 Manuel Franceschini
      result = rpc.call_file_storage_dir_rename(inst.primary_node,
2401 b23c4333 Manuel Franceschini
                                                old_file_storage_dir,
2402 b23c4333 Manuel Franceschini
                                                new_file_storage_dir)
2403 b23c4333 Manuel Franceschini
2404 b23c4333 Manuel Franceschini
      if not result:
2405 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2406 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2407 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2408 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2409 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2410 b23c4333 Manuel Franceschini
2411 b23c4333 Manuel Franceschini
      if not result[0]:
2412 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2413 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2414 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2415 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2416 b23c4333 Manuel Franceschini
2417 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2418 decd5f45 Iustin Pop
    try:
2419 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2420 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2421 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2422 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2423 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2424 decd5f45 Iustin Pop
        logger.Error(msg)
2425 decd5f45 Iustin Pop
    finally:
2426 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2427 decd5f45 Iustin Pop
2428 decd5f45 Iustin Pop
2429 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2430 a8083063 Iustin Pop
  """Remove an instance.
2431 a8083063 Iustin Pop

2432 a8083063 Iustin Pop
  """
2433 a8083063 Iustin Pop
  HPATH = "instance-remove"
2434 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2435 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2436 a8083063 Iustin Pop
2437 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2438 a8083063 Iustin Pop
    """Build hooks env.
2439 a8083063 Iustin Pop

2440 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2441 a8083063 Iustin Pop

2442 a8083063 Iustin Pop
    """
2443 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2444 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2445 a8083063 Iustin Pop
    return env, nl, nl
2446 a8083063 Iustin Pop
2447 a8083063 Iustin Pop
  def CheckPrereq(self):
2448 a8083063 Iustin Pop
    """Check prerequisites.
2449 a8083063 Iustin Pop

2450 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2451 a8083063 Iustin Pop

2452 a8083063 Iustin Pop
    """
2453 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2454 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2455 a8083063 Iustin Pop
    if instance is None:
2456 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2457 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2458 a8083063 Iustin Pop
    self.instance = instance
2459 a8083063 Iustin Pop
2460 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2461 a8083063 Iustin Pop
    """Remove the instance.
2462 a8083063 Iustin Pop

2463 a8083063 Iustin Pop
    """
2464 a8083063 Iustin Pop
    instance = self.instance
2465 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2466 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2467 a8083063 Iustin Pop
2468 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2469 1d67656e Iustin Pop
      if self.op.ignore_failures:
2470 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2471 1d67656e Iustin Pop
      else:
2472 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2473 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2474 a8083063 Iustin Pop
2475 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2476 a8083063 Iustin Pop
2477 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2478 1d67656e Iustin Pop
      if self.op.ignore_failures:
2479 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2480 1d67656e Iustin Pop
      else:
2481 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2482 a8083063 Iustin Pop
2483 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2484 a8083063 Iustin Pop
2485 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2486 a2fd9afc Guido Trotter
    # Remove the new instance from the Ganeti Lock Manager
2487 a2fd9afc Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, instance.name)
2488 a8083063 Iustin Pop
2489 a8083063 Iustin Pop
2490 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2491 a8083063 Iustin Pop
  """Logical unit for querying instances.
2492 a8083063 Iustin Pop

2493 a8083063 Iustin Pop
  """
2494 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2495 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2496 a8083063 Iustin Pop
2497 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2498 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2499 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2500 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2501 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2502 3fb1e1c5 Alexander Schreiber
                               "sda_size", "sdb_size", "vcpus", "tags",
2503 3fb1e1c5 Alexander Schreiber
                               "auto_balance",
2504 3fb1e1c5 Alexander Schreiber
                               "network_port", "kernel_path", "initrd_path",
2505 3fb1e1c5 Alexander Schreiber
                               "hvm_boot_order", "hvm_acpi", "hvm_pae",
2506 3fb1e1c5 Alexander Schreiber
                               "hvm_cdrom_image_path", "hvm_nic_type",
2507 3fb1e1c5 Alexander Schreiber
                               "hvm_disk_type", "vnc_bind_address"],
2508 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2509 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2510 a8083063 Iustin Pop
2511 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2512 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2513 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2514 7eb9d8f7 Guido Trotter
2515 7eb9d8f7 Guido Trotter
    # TODO: we could lock instances (and nodes) only if the user asked for
2516 7eb9d8f7 Guido Trotter
    # dynamic fields. For that we need atomic ways to get info for a group of
2517 7eb9d8f7 Guido Trotter
    # instances from the config, though.
2518 7eb9d8f7 Guido Trotter
    if not self.op.names:
2519 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
2520 7eb9d8f7 Guido Trotter
    else:
2521 7eb9d8f7 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = \
2522 7eb9d8f7 Guido Trotter
        _GetWantedInstances(self, self.op.names)
2523 7eb9d8f7 Guido Trotter
2524 7eb9d8f7 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2525 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2526 7eb9d8f7 Guido Trotter
2527 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2528 7eb9d8f7 Guido Trotter
    # TODO: locking of nodes could be avoided when not querying them
2529 7eb9d8f7 Guido Trotter
    if level == locking.LEVEL_NODE:
2530 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2531 7eb9d8f7 Guido Trotter
2532 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2533 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2534 7eb9d8f7 Guido Trotter

2535 7eb9d8f7 Guido Trotter
    """
2536 7eb9d8f7 Guido Trotter
    # This of course is valid only if we locked the instances
2537 6683bba2 Guido Trotter
    self.wanted = self.acquired_locks[locking.LEVEL_INSTANCE]
2538 069dcc86 Iustin Pop
2539 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2540 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2541 a8083063 Iustin Pop

2542 a8083063 Iustin Pop
    """
2543 069dcc86 Iustin Pop
    instance_names = self.wanted
2544 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2545 a8083063 Iustin Pop
                     in instance_names]
2546 a8083063 Iustin Pop
2547 a8083063 Iustin Pop
    # begin data gathering
2548 a8083063 Iustin Pop
2549 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2550 a8083063 Iustin Pop
2551 a8083063 Iustin Pop
    bad_nodes = []
2552 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2553 a8083063 Iustin Pop
      live_data = {}
2554 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2555 a8083063 Iustin Pop
      for name in nodes:
2556 a8083063 Iustin Pop
        result = node_data[name]
2557 a8083063 Iustin Pop
        if result:
2558 a8083063 Iustin Pop
          live_data.update(result)
2559 a8083063 Iustin Pop
        elif result == False:
2560 a8083063 Iustin Pop
          bad_nodes.append(name)
2561 a8083063 Iustin Pop
        # else no instance is alive
2562 a8083063 Iustin Pop
    else:
2563 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2564 a8083063 Iustin Pop
2565 a8083063 Iustin Pop
    # end data gathering
2566 a8083063 Iustin Pop
2567 a8083063 Iustin Pop
    output = []
2568 a8083063 Iustin Pop
    for instance in instance_list:
2569 a8083063 Iustin Pop
      iout = []
2570 a8083063 Iustin Pop
      for field in self.op.output_fields:
2571 a8083063 Iustin Pop
        if field == "name":
2572 a8083063 Iustin Pop
          val = instance.name
2573 a8083063 Iustin Pop
        elif field == "os":
2574 a8083063 Iustin Pop
          val = instance.os
2575 a8083063 Iustin Pop
        elif field == "pnode":
2576 a8083063 Iustin Pop
          val = instance.primary_node
2577 a8083063 Iustin Pop
        elif field == "snodes":
2578 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2579 a8083063 Iustin Pop
        elif field == "admin_state":
2580 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2581 a8083063 Iustin Pop
        elif field == "oper_state":
2582 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2583 8a23d2d3 Iustin Pop
            val = None
2584 a8083063 Iustin Pop
          else:
2585 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2586 d8052456 Iustin Pop
        elif field == "status":
2587 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2588 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2589 d8052456 Iustin Pop
          else:
2590 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2591 d8052456 Iustin Pop
            if running:
2592 d8052456 Iustin Pop
              if instance.status != "down":
2593 d8052456 Iustin Pop
                val = "running"
2594 d8052456 Iustin Pop
              else:
2595 d8052456 Iustin Pop
                val = "ERROR_up"
2596 d8052456 Iustin Pop
            else:
2597 d8052456 Iustin Pop
              if instance.status != "down":
2598 d8052456 Iustin Pop
                val = "ERROR_down"
2599 d8052456 Iustin Pop
              else:
2600 d8052456 Iustin Pop
                val = "ADMIN_down"
2601 a8083063 Iustin Pop
        elif field == "admin_ram":
2602 a8083063 Iustin Pop
          val = instance.memory
2603 a8083063 Iustin Pop
        elif field == "oper_ram":
2604 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2605 8a23d2d3 Iustin Pop
            val = None
2606 a8083063 Iustin Pop
          elif instance.name in live_data:
2607 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2608 a8083063 Iustin Pop
          else:
2609 a8083063 Iustin Pop
            val = "-"
2610 a8083063 Iustin Pop
        elif field == "disk_template":
2611 a8083063 Iustin Pop
          val = instance.disk_template
2612 a8083063 Iustin Pop
        elif field == "ip":
2613 a8083063 Iustin Pop
          val = instance.nics[0].ip
2614 a8083063 Iustin Pop
        elif field == "bridge":
2615 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2616 a8083063 Iustin Pop
        elif field == "mac":
2617 a8083063 Iustin Pop
          val = instance.nics[0].mac
2618 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2619 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2620 644eeef9 Iustin Pop
          if disk is None:
2621 8a23d2d3 Iustin Pop
            val = None
2622 644eeef9 Iustin Pop
          else:
2623 644eeef9 Iustin Pop
            val = disk.size
2624 d6d415e8 Iustin Pop
        elif field == "vcpus":
2625 d6d415e8 Iustin Pop
          val = instance.vcpus
2626 130a6a6f Iustin Pop
        elif field == "tags":
2627 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2628 3fb1e1c5 Alexander Schreiber
        elif field in ("network_port", "kernel_path", "initrd_path",
2629 3fb1e1c5 Alexander Schreiber
                       "hvm_boot_order", "hvm_acpi", "hvm_pae",
2630 3fb1e1c5 Alexander Schreiber
                       "hvm_cdrom_image_path", "hvm_nic_type",
2631 3fb1e1c5 Alexander Schreiber
                       "hvm_disk_type", "vnc_bind_address"):
2632 3fb1e1c5 Alexander Schreiber
          val = getattr(instance, field, None)
2633 3fb1e1c5 Alexander Schreiber
          if val is not None:
2634 3fb1e1c5 Alexander Schreiber
            pass
2635 3fb1e1c5 Alexander Schreiber
          elif field in ("hvm_nic_type", "hvm_disk_type",
2636 3fb1e1c5 Alexander Schreiber
                         "kernel_path", "initrd_path"):
2637 3fb1e1c5 Alexander Schreiber
            val = "default"
2638 3fb1e1c5 Alexander Schreiber
          else:
2639 3fb1e1c5 Alexander Schreiber
            val = "-"
2640 a8083063 Iustin Pop
        else:
2641 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2642 a8083063 Iustin Pop
        iout.append(val)
2643 a8083063 Iustin Pop
      output.append(iout)
2644 a8083063 Iustin Pop
2645 a8083063 Iustin Pop
    return output
2646 a8083063 Iustin Pop
2647 a8083063 Iustin Pop
2648 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2649 a8083063 Iustin Pop
  """Failover an instance.
2650 a8083063 Iustin Pop

2651 a8083063 Iustin Pop
  """
2652 a8083063 Iustin Pop
  HPATH = "instance-failover"
2653 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2654 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2655 c9e5c064 Guido Trotter
  REQ_BGL = False
2656 c9e5c064 Guido Trotter
2657 c9e5c064 Guido Trotter
  def ExpandNames(self):
2658 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2659 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2660 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2661 c9e5c064 Guido Trotter
2662 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2663 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2664 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2665 a8083063 Iustin Pop
2666 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2667 a8083063 Iustin Pop
    """Build hooks env.
2668 a8083063 Iustin Pop

2669 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2670 a8083063 Iustin Pop

2671 a8083063 Iustin Pop
    """
2672 a8083063 Iustin Pop
    env = {
2673 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2674 a8083063 Iustin Pop
      }
2675 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2676 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2677 a8083063 Iustin Pop
    return env, nl, nl
2678 a8083063 Iustin Pop
2679 a8083063 Iustin Pop
  def CheckPrereq(self):
2680 a8083063 Iustin Pop
    """Check prerequisites.
2681 a8083063 Iustin Pop

2682 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2683 a8083063 Iustin Pop

2684 a8083063 Iustin Pop
    """
2685 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2686 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2687 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2688 a8083063 Iustin Pop
2689 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2690 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2691 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2692 2a710df1 Michael Hanselmann
2693 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2694 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2695 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2696 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2697 2a710df1 Michael Hanselmann
2698 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2699 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2700 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2701 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2702 3a7c308e Guido Trotter
2703 a8083063 Iustin Pop
    # check bridge existance
2704 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2705 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2706 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2707 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2708 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2709 a8083063 Iustin Pop
2710 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2711 a8083063 Iustin Pop
    """Failover an instance.
2712 a8083063 Iustin Pop

2713 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2714 a8083063 Iustin Pop
    starting it on the secondary.
2715 a8083063 Iustin Pop

2716 a8083063 Iustin Pop
    """
2717 a8083063 Iustin Pop
    instance = self.instance
2718 a8083063 Iustin Pop
2719 a8083063 Iustin Pop
    source_node = instance.primary_node
2720 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2721 a8083063 Iustin Pop
2722 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2723 a8083063 Iustin Pop
    for dev in instance.disks:
2724 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2725 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2726 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2727 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2728 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2729 a8083063 Iustin Pop
2730 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2731 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2732 a8083063 Iustin Pop
                (instance.name, source_node))
2733 a8083063 Iustin Pop
2734 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2735 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2736 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2737 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2738 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2739 24a40d57 Iustin Pop
      else:
2740 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2741 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2742 a8083063 Iustin Pop
2743 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2744 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2745 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2746 a8083063 Iustin Pop
2747 a8083063 Iustin Pop
    instance.primary_node = target_node
2748 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2749 b6102dab Guido Trotter
    self.cfg.Update(instance)
2750 a8083063 Iustin Pop
2751 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2752 12a0cfbe Guido Trotter
    if instance.status == "up":
2753 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2754 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2755 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2756 12a0cfbe Guido Trotter
2757 12a0cfbe Guido Trotter
      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2758 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2759 12a0cfbe Guido Trotter
      if not disks_ok:
2760 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2761 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2762 a8083063 Iustin Pop
2763 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2764 12a0cfbe Guido Trotter
      if not rpc.call_instance_start(target_node, instance, None):
2765 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2766 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2767 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2768 a8083063 Iustin Pop
2769 a8083063 Iustin Pop
2770 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2771 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2772 a8083063 Iustin Pop

2773 a8083063 Iustin Pop
  This always creates all devices.
2774 a8083063 Iustin Pop

2775 a8083063 Iustin Pop
  """
2776 a8083063 Iustin Pop
  if device.children:
2777 a8083063 Iustin Pop
    for child in device.children:
2778 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2779 a8083063 Iustin Pop
        return False
2780 a8083063 Iustin Pop
2781 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2782 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2783 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2784 a8083063 Iustin Pop
  if not new_id:
2785 a8083063 Iustin Pop
    return False
2786 a8083063 Iustin Pop
  if device.physical_id is None:
2787 a8083063 Iustin Pop
    device.physical_id = new_id
2788 a8083063 Iustin Pop
  return True
2789 a8083063 Iustin Pop
2790 a8083063 Iustin Pop
2791 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2792 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2793 a8083063 Iustin Pop

2794 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2795 a8083063 Iustin Pop
  all its children.
2796 a8083063 Iustin Pop

2797 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2798 a8083063 Iustin Pop

2799 a8083063 Iustin Pop
  """
2800 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2801 a8083063 Iustin Pop
    force = True
2802 a8083063 Iustin Pop
  if device.children:
2803 a8083063 Iustin Pop
    for child in device.children:
2804 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2805 3f78eef2 Iustin Pop
                                        child, force, info):
2806 a8083063 Iustin Pop
        return False
2807 a8083063 Iustin Pop
2808 a8083063 Iustin Pop
  if not force:
2809 a8083063 Iustin Pop
    return True
2810 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2811 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2812 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2813 a8083063 Iustin Pop
  if not new_id:
2814 a8083063 Iustin Pop
    return False
2815 a8083063 Iustin Pop
  if device.physical_id is None:
2816 a8083063 Iustin Pop
    device.physical_id = new_id
2817 a8083063 Iustin Pop
  return True
2818 a8083063 Iustin Pop
2819 a8083063 Iustin Pop
2820 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2821 923b1523 Iustin Pop
  """Generate a suitable LV name.
2822 923b1523 Iustin Pop

2823 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2824 923b1523 Iustin Pop

2825 923b1523 Iustin Pop
  """
2826 923b1523 Iustin Pop
  results = []
2827 923b1523 Iustin Pop
  for val in exts:
2828 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2829 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2830 923b1523 Iustin Pop
  return results
2831 923b1523 Iustin Pop
2832 923b1523 Iustin Pop
2833 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2834 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2835 a1f445d3 Iustin Pop

2836 a1f445d3 Iustin Pop
  """
2837 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2838 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2839 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2840 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2841 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2842 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2843 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2844 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2845 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2846 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2847 a1f445d3 Iustin Pop
  return drbd_dev
2848 a1f445d3 Iustin Pop
2849 7c0d6283 Michael Hanselmann
2850 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2851 a8083063 Iustin Pop
                          instance_name, primary_node,
2852 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
2853 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
2854 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2855 a8083063 Iustin Pop

2856 a8083063 Iustin Pop
  """
2857 a8083063 Iustin Pop
  #TODO: compute space requirements
2858 a8083063 Iustin Pop
2859 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2860 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2861 a8083063 Iustin Pop
    disks = []
2862 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2863 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2864 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2865 923b1523 Iustin Pop
2866 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2867 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2868 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2869 a8083063 Iustin Pop
                           iv_name = "sda")
2870 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2871 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2872 a8083063 Iustin Pop
                           iv_name = "sdb")
2873 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2874 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2875 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2876 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2877 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2878 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2879 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2880 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2881 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2882 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2883 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2884 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2885 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
2886 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
2887 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
2888 0f1a06e3 Manuel Franceschini
2889 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
2890 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
2891 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
2892 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
2893 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
2894 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
2895 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
2896 a8083063 Iustin Pop
  else:
2897 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2898 a8083063 Iustin Pop
  return disks
2899 a8083063 Iustin Pop
2900 a8083063 Iustin Pop
2901 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2902 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2903 3ecf6786 Iustin Pop

2904 3ecf6786 Iustin Pop
  """
2905 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2906 a0c3fea1 Michael Hanselmann
2907 a0c3fea1 Michael Hanselmann
2908 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2909 a8083063 Iustin Pop
  """Create all disks for an instance.
2910 a8083063 Iustin Pop

2911 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2912 a8083063 Iustin Pop

2913 a8083063 Iustin Pop
  Args:
2914 a8083063 Iustin Pop
    instance: the instance object
2915 a8083063 Iustin Pop

2916 a8083063 Iustin Pop
  Returns:
2917 a8083063 Iustin Pop
    True or False showing the success of the creation process
2918 a8083063 Iustin Pop

2919 a8083063 Iustin Pop
  """
2920 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2921 a0c3fea1 Michael Hanselmann
2922 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2923 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2924 0f1a06e3 Manuel Franceschini
    result = rpc.call_file_storage_dir_create(instance.primary_node,
2925 0f1a06e3 Manuel Franceschini
                                              file_storage_dir)
2926 0f1a06e3 Manuel Franceschini
2927 0f1a06e3 Manuel Franceschini
    if not result:
2928 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
2929 0f1a06e3 Manuel Franceschini
      return False
2930 0f1a06e3 Manuel Franceschini
2931 0f1a06e3 Manuel Franceschini
    if not result[0]:
2932 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
2933 0f1a06e3 Manuel Franceschini
      return False
2934 0f1a06e3 Manuel Franceschini
2935 a8083063 Iustin Pop
  for device in instance.disks:
2936 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2937 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
2938 a8083063 Iustin Pop
    #HARDCODE
2939 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2940 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2941 3f78eef2 Iustin Pop
                                        device, False, info):
2942 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2943 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2944 a8083063 Iustin Pop
        return False
2945 a8083063 Iustin Pop
    #HARDCODE
2946 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2947 3f78eef2 Iustin Pop
                                    instance, device, info):
2948 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2949 a8083063 Iustin Pop
                   device.iv_name)
2950 a8083063 Iustin Pop
      return False
2951 1c6e3627 Manuel Franceschini
2952 a8083063 Iustin Pop
  return True
2953 a8083063 Iustin Pop
2954 a8083063 Iustin Pop
2955 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2956 a8083063 Iustin Pop
  """Remove all disks for an instance.
2957 a8083063 Iustin Pop

2958 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2959 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2960 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2961 a8083063 Iustin Pop
  with `_CreateDisks()`).
2962 a8083063 Iustin Pop

2963 a8083063 Iustin Pop
  Args:
2964 a8083063 Iustin Pop
    instance: the instance object
2965 a8083063 Iustin Pop

2966 a8083063 Iustin Pop
  Returns:
2967 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2968 a8083063 Iustin Pop

2969 a8083063 Iustin Pop
  """
2970 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2971 a8083063 Iustin Pop
2972 a8083063 Iustin Pop
  result = True
2973 a8083063 Iustin Pop
  for device in instance.disks:
2974 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2975 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2976 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2977 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2978 a8083063 Iustin Pop
                     " continuing anyway" %
2979 a8083063 Iustin Pop
                     (device.iv_name, node))
2980 a8083063 Iustin Pop
        result = False
2981 0f1a06e3 Manuel Franceschini
2982 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2983 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2984 0f1a06e3 Manuel Franceschini
    if not rpc.call_file_storage_dir_remove(instance.primary_node,
2985 0f1a06e3 Manuel Franceschini
                                            file_storage_dir):
2986 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
2987 0f1a06e3 Manuel Franceschini
      result = False
2988 0f1a06e3 Manuel Franceschini
2989 a8083063 Iustin Pop
  return result
2990 a8083063 Iustin Pop
2991 a8083063 Iustin Pop
2992 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
2993 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
2994 e2fe6369 Iustin Pop

2995 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
2996 e2fe6369 Iustin Pop

2997 e2fe6369 Iustin Pop
  """
2998 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
2999 e2fe6369 Iustin Pop
  req_size_dict = {
3000 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3001 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
3002 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
3003 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
3004 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3005 e2fe6369 Iustin Pop
  }
3006 e2fe6369 Iustin Pop
3007 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3008 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3009 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3010 e2fe6369 Iustin Pop
3011 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3012 e2fe6369 Iustin Pop
3013 e2fe6369 Iustin Pop
3014 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3015 a8083063 Iustin Pop
  """Create an instance.
3016 a8083063 Iustin Pop

3017 a8083063 Iustin Pop
  """
3018 a8083063 Iustin Pop
  HPATH = "instance-add"
3019 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3020 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
3021 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
3022 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
3023 a8083063 Iustin Pop
3024 538475ca Iustin Pop
  def _RunAllocator(self):
3025 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3026 538475ca Iustin Pop

3027 538475ca Iustin Pop
    """
3028 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3029 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3030 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3031 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3032 d1c2dd75 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3033 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3034 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3035 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3036 d1c2dd75 Iustin Pop
                     tags=[],
3037 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3038 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
3039 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
3040 d1c2dd75 Iustin Pop
                     disks=disks,
3041 d1c2dd75 Iustin Pop
                     nics=nics,
3042 29859cb7 Iustin Pop
                     )
3043 d1c2dd75 Iustin Pop
3044 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3045 d1c2dd75 Iustin Pop
3046 d1c2dd75 Iustin Pop
    if not ial.success:
3047 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3048 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3049 d1c2dd75 Iustin Pop
                                                           ial.info))
3050 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3051 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3052 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3053 27579978 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3054 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3055 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
3056 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
3057 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
3058 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
3059 27579978 Iustin Pop
    if ial.required_nodes == 2:
3060 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3061 538475ca Iustin Pop
3062 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3063 a8083063 Iustin Pop
    """Build hooks env.
3064 a8083063 Iustin Pop

3065 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3066 a8083063 Iustin Pop

3067 a8083063 Iustin Pop
    """
3068 a8083063 Iustin Pop
    env = {
3069 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3070 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3071 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3072 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3073 a8083063 Iustin Pop
      }
3074 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3075 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3076 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3077 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
3078 396e1b78 Michael Hanselmann
3079 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3080 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3081 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3082 396e1b78 Michael Hanselmann
      status=self.instance_status,
3083 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3084 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
3085 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
3086 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3087 396e1b78 Michael Hanselmann
    ))
3088 a8083063 Iustin Pop
3089 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
3090 a8083063 Iustin Pop
          self.secondaries)
3091 a8083063 Iustin Pop
    return env, nl, nl
3092 a8083063 Iustin Pop
3093 a8083063 Iustin Pop
3094 a8083063 Iustin Pop
  def CheckPrereq(self):
3095 a8083063 Iustin Pop
    """Check prerequisites.
3096 a8083063 Iustin Pop

3097 a8083063 Iustin Pop
    """
3098 538475ca Iustin Pop
    # set optional parameters to none if they don't exist
3099 538475ca Iustin Pop
    for attr in ["kernel_path", "initrd_path", "hvm_boot_order", "pnode",
3100 31a853d2 Iustin Pop
                 "iallocator", "hvm_acpi", "hvm_pae", "hvm_cdrom_image_path",
3101 5397e0b7 Alexander Schreiber
                 "hvm_nic_type", "hvm_disk_type", "vnc_bind_address"]:
3102 40ed12dd Guido Trotter
      if not hasattr(self.op, attr):
3103 40ed12dd Guido Trotter
        setattr(self.op, attr, None)
3104 40ed12dd Guido Trotter
3105 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
3106 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
3107 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3108 3ecf6786 Iustin Pop
                                 self.op.mode)
3109 a8083063 Iustin Pop
3110 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3111 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3112 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3113 eedc99de Manuel Franceschini
                                 " instances")
3114 eedc99de Manuel Franceschini
3115 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3116 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
3117 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
3118 a8083063 Iustin Pop
      if src_node is None or src_path is None:
3119 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
3120 3ecf6786 Iustin Pop
                                   " node and path options")
3121 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
3122 a8083063 Iustin Pop
      if src_node_full is None:
3123 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
3124 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
3125 a8083063 Iustin Pop
3126 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
3127 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
3128 a8083063 Iustin Pop
3129 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
3130 a8083063 Iustin Pop
3131 a8083063 Iustin Pop
      if not export_info:
3132 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3133 a8083063 Iustin Pop
3134 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3135 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3136 a8083063 Iustin Pop
3137 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3138 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3139 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3140 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3141 a8083063 Iustin Pop
3142 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3143 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3144 3ecf6786 Iustin Pop
                                   " one data disk")
3145 a8083063 Iustin Pop
3146 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3147 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3148 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3149 a8083063 Iustin Pop
                                                         'disk0_dump'))
3150 a8083063 Iustin Pop
      self.src_image = diskimage
3151 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
3152 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
3153 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
3154 a8083063 Iustin Pop
3155 901a65c1 Iustin Pop
    #### instance parameters check
3156 901a65c1 Iustin Pop
3157 a8083063 Iustin Pop
    # disk template and mirror node verification
3158 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3159 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
3160 a8083063 Iustin Pop
3161 901a65c1 Iustin Pop
    # instance name verification
3162 901a65c1 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
3163 901a65c1 Iustin Pop
3164 901a65c1 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
3165 901a65c1 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
3166 901a65c1 Iustin Pop
    if instance_name in instance_list:
3167 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3168 901a65c1 Iustin Pop
                                 instance_name)
3169 901a65c1 Iustin Pop
3170 901a65c1 Iustin Pop
    # ip validity checks
3171 901a65c1 Iustin Pop
    ip = getattr(self.op, "ip", None)
3172 901a65c1 Iustin Pop
    if ip is None or ip.lower() == "none":
3173 901a65c1 Iustin Pop
      inst_ip = None
3174 901a65c1 Iustin Pop
    elif ip.lower() == "auto":
3175 901a65c1 Iustin Pop
      inst_ip = hostname1.ip
3176 901a65c1 Iustin Pop
    else:
3177 901a65c1 Iustin Pop
      if not utils.IsValidIP(ip):
3178 901a65c1 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3179 901a65c1 Iustin Pop
                                   " like a valid IP" % ip)
3180 901a65c1 Iustin Pop
      inst_ip = ip
3181 901a65c1 Iustin Pop
    self.inst_ip = self.op.ip = inst_ip
3182 901a65c1 Iustin Pop
3183 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3184 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3185 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3186 901a65c1 Iustin Pop
3187 901a65c1 Iustin Pop
    if self.op.ip_check:
3188 901a65c1 Iustin Pop
      if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT):
3189 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3190 901a65c1 Iustin Pop
                                   (hostname1.ip, instance_name))
3191 901a65c1 Iustin Pop
3192 901a65c1 Iustin Pop
    # MAC address verification
3193 901a65c1 Iustin Pop
    if self.op.mac != "auto":
3194 901a65c1 Iustin Pop
      if not utils.IsValidMac(self.op.mac.lower()):
3195 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3196 901a65c1 Iustin Pop
                                   self.op.mac)
3197 901a65c1 Iustin Pop
3198 901a65c1 Iustin Pop
    # bridge verification
3199 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3200 901a65c1 Iustin Pop
    if bridge is None:
3201 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3202 901a65c1 Iustin Pop
    else:
3203 901a65c1 Iustin Pop
      self.op.bridge = bridge
3204 901a65c1 Iustin Pop
3205 901a65c1 Iustin Pop
    # boot order verification
3206 901a65c1 Iustin Pop
    if self.op.hvm_boot_order is not None:
3207 901a65c1 Iustin Pop
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3208 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid boot order specified,"
3209 901a65c1 Iustin Pop
                                   " must be one or more of [acdn]")
3210 901a65c1 Iustin Pop
    # file storage checks
3211 0f1a06e3 Manuel Franceschini
    if (self.op.file_driver and
3212 0f1a06e3 Manuel Franceschini
        not self.op.file_driver in constants.FILE_DRIVER):
3213 0f1a06e3 Manuel Franceschini
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3214 0f1a06e3 Manuel Franceschini
                                 self.op.file_driver)
3215 0f1a06e3 Manuel Franceschini
3216 0f1a06e3 Manuel Franceschini
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3217 b4de68a9 Iustin Pop
      raise errors.OpPrereqError("File storage directory not a relative"
3218 b4de68a9 Iustin Pop
                                 " path")
3219 538475ca Iustin Pop
    #### allocator run
3220 538475ca Iustin Pop
3221 538475ca Iustin Pop
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3222 538475ca Iustin Pop
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3223 538475ca Iustin Pop
                                 " node must be given")
3224 538475ca Iustin Pop
3225 538475ca Iustin Pop
    if self.op.iallocator is not None:
3226 538475ca Iustin Pop
      self._RunAllocator()
3227 0f1a06e3 Manuel Franceschini
3228 901a65c1 Iustin Pop
    #### node related checks
3229 901a65c1 Iustin Pop
3230 901a65c1 Iustin Pop
    # check primary node
3231 901a65c1 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
3232 901a65c1 Iustin Pop
    if pnode is None:
3233 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
3234 901a65c1 Iustin Pop
                                 self.op.pnode)
3235 901a65c1 Iustin Pop
    self.op.pnode = pnode.name
3236 901a65c1 Iustin Pop
    self.pnode = pnode
3237 901a65c1 Iustin Pop
    self.secondaries = []
3238 901a65c1 Iustin Pop
3239 901a65c1 Iustin Pop
    # mirror node verification
3240 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3241 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
3242 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3243 3ecf6786 Iustin Pop
                                   " a mirror node")
3244 a8083063 Iustin Pop
3245 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
3246 a8083063 Iustin Pop
      if snode_name is None:
3247 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
3248 3ecf6786 Iustin Pop
                                   self.op.snode)
3249 a8083063 Iustin Pop
      elif snode_name == pnode.name:
3250 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3251 3ecf6786 Iustin Pop
                                   " the primary node.")
3252 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
3253 a8083063 Iustin Pop
3254 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3255 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3256 ed1ebc60 Guido Trotter
3257 8d75db10 Iustin Pop
    # Check lv size requirements
3258 8d75db10 Iustin Pop
    if req_size is not None:
3259 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3260 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3261 8d75db10 Iustin Pop
      for node in nodenames:
3262 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3263 8d75db10 Iustin Pop
        if not info:
3264 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3265 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3266 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3267 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3268 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3269 8d75db10 Iustin Pop
                                     " node %s" % node)
3270 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3271 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3272 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3273 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3274 ed1ebc60 Guido Trotter
3275 a8083063 Iustin Pop
    # os verification
3276 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3277 dfa96ded Guido Trotter
    if not os_obj:
3278 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3279 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3280 a8083063 Iustin Pop
3281 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3282 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3283 3b6d8c9b Iustin Pop
3284 a8083063 Iustin Pop
3285 901a65c1 Iustin Pop
    # bridge check on primary node
3286 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3287 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3288 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3289 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3290 a8083063 Iustin Pop
3291 49ce1563 Iustin Pop
    # memory check on primary node
3292 49ce1563 Iustin Pop
    if self.op.start:
3293 49ce1563 Iustin Pop
      _CheckNodeFreeMemory(self.cfg, self.pnode.name,
3294 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3295 49ce1563 Iustin Pop
                           self.op.mem_size)
3296 49ce1563 Iustin Pop
3297 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
3298 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
3299 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
3300 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
3301 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
3302 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3303 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
3304 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
3305 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
3306 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
3307 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3308 31a853d2 Iustin Pop
3309 31a853d2 Iustin Pop
    # vnc_bind_address verification
3310 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
3311 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
3312 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
3313 31a853d2 Iustin Pop
                                   " like a valid IP address" %
3314 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
3315 31a853d2 Iustin Pop
3316 5397e0b7 Alexander Schreiber
    # Xen HVM device type checks
3317 5397e0b7 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
3318 5397e0b7 Alexander Schreiber
      if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
3319 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid NIC type %s specified for Xen HVM"
3320 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_nic_type)
3321 5397e0b7 Alexander Schreiber
      if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
3322 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid disk type %s specified for Xen HVM"
3323 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_disk_type)
3324 5397e0b7 Alexander Schreiber
3325 a8083063 Iustin Pop
    if self.op.start:
3326 a8083063 Iustin Pop
      self.instance_status = 'up'
3327 a8083063 Iustin Pop
    else:
3328 a8083063 Iustin Pop
      self.instance_status = 'down'
3329 a8083063 Iustin Pop
3330 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3331 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3332 a8083063 Iustin Pop

3333 a8083063 Iustin Pop
    """
3334 a8083063 Iustin Pop
    instance = self.op.instance_name
3335 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3336 a8083063 Iustin Pop
3337 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3338 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3339 1862d460 Alexander Schreiber
    else:
3340 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3341 1862d460 Alexander Schreiber
3342 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3343 a8083063 Iustin Pop
    if self.inst_ip is not None:
3344 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3345 a8083063 Iustin Pop
3346 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3347 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3348 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3349 2a6469d5 Alexander Schreiber
    else:
3350 2a6469d5 Alexander Schreiber
      network_port = None
3351 58acb49d Alexander Schreiber
3352 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is None:
3353 31a853d2 Iustin Pop
      self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3354 31a853d2 Iustin Pop
3355 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3356 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3357 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3358 2c313123 Manuel Franceschini
    else:
3359 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3360 2c313123 Manuel Franceschini
3361 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3362 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3363 0f1a06e3 Manuel Franceschini
                                        self.sstore.GetFileStorageDir(),
3364 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3365 0f1a06e3 Manuel Franceschini
3366 0f1a06e3 Manuel Franceschini
3367 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3368 a8083063 Iustin Pop
                                  self.op.disk_template,
3369 a8083063 Iustin Pop
                                  instance, pnode_name,
3370 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3371 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3372 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3373 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3374 a8083063 Iustin Pop
3375 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3376 a8083063 Iustin Pop
                            primary_node=pnode_name,
3377 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3378 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3379 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3380 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3381 a8083063 Iustin Pop
                            status=self.instance_status,
3382 58acb49d Alexander Schreiber
                            network_port=network_port,
3383 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3384 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3385 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3386 31a853d2 Iustin Pop
                            hvm_acpi=self.op.hvm_acpi,
3387 31a853d2 Iustin Pop
                            hvm_pae=self.op.hvm_pae,
3388 31a853d2 Iustin Pop
                            hvm_cdrom_image_path=self.op.hvm_cdrom_image_path,
3389 31a853d2 Iustin Pop
                            vnc_bind_address=self.op.vnc_bind_address,
3390 5397e0b7 Alexander Schreiber
                            hvm_nic_type=self.op.hvm_nic_type,
3391 5397e0b7 Alexander Schreiber
                            hvm_disk_type=self.op.hvm_disk_type,
3392 a8083063 Iustin Pop
                            )
3393 a8083063 Iustin Pop
3394 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3395 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3396 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3397 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3398 a8083063 Iustin Pop
3399 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3400 a8083063 Iustin Pop
3401 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3402 a2fd9afc Guido Trotter
    # Add the new instance to the Ganeti Lock Manager
3403 a2fd9afc Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, instance)
3404 a8083063 Iustin Pop
3405 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3406 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3407 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3408 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3409 a8083063 Iustin Pop
      time.sleep(15)
3410 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3411 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3412 a8083063 Iustin Pop
    else:
3413 a8083063 Iustin Pop
      disk_abort = False
3414 a8083063 Iustin Pop
3415 a8083063 Iustin Pop
    if disk_abort:
3416 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3417 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3418 a2fd9afc Guido Trotter
      # Remove the new instance from the Ganeti Lock Manager
3419 a2fd9afc Guido Trotter
      self.context.glm.remove(locking.LEVEL_INSTANCE, iobj.name)
3420 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3421 3ecf6786 Iustin Pop
                               " this instance")
3422 a8083063 Iustin Pop
3423 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3424 a8083063 Iustin Pop
                (instance, pnode_name))
3425 a8083063 Iustin Pop
3426 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3427 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3428 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3429 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3430 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3431 3ecf6786 Iustin Pop
                                   " on node %s" %
3432 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3433 a8083063 Iustin Pop
3434 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3435 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3436 a8083063 Iustin Pop
        src_node = self.op.src_node
3437 a8083063 Iustin Pop
        src_image = self.src_image
3438 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3439 a8083063 Iustin Pop
                                                src_node, src_image):
3440 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3441 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3442 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3443 a8083063 Iustin Pop
      else:
3444 a8083063 Iustin Pop
        # also checked in the prereq part
3445 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3446 3ecf6786 Iustin Pop
                                     % self.op.mode)
3447 a8083063 Iustin Pop
3448 a8083063 Iustin Pop
    if self.op.start:
3449 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3450 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3451 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3452 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3453 a8083063 Iustin Pop
3454 a8083063 Iustin Pop
3455 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3456 a8083063 Iustin Pop
  """Connect to an instance's console.
3457 a8083063 Iustin Pop

3458 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3459 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3460 a8083063 Iustin Pop
  console.
3461 a8083063 Iustin Pop

3462 a8083063 Iustin Pop
  """
3463 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3464 8659b73e Guido Trotter
  REQ_BGL = False
3465 8659b73e Guido Trotter
3466 8659b73e Guido Trotter
  def ExpandNames(self):
3467 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3468 a8083063 Iustin Pop
3469 a8083063 Iustin Pop
  def CheckPrereq(self):
3470 a8083063 Iustin Pop
    """Check prerequisites.
3471 a8083063 Iustin Pop

3472 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3473 a8083063 Iustin Pop

3474 a8083063 Iustin Pop
    """
3475 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3476 8659b73e Guido Trotter
    assert self.instance is not None, \
3477 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3478 a8083063 Iustin Pop
3479 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3480 a8083063 Iustin Pop
    """Connect to the console of an instance
3481 a8083063 Iustin Pop

3482 a8083063 Iustin Pop
    """
3483 a8083063 Iustin Pop
    instance = self.instance
3484 a8083063 Iustin Pop
    node = instance.primary_node
3485 a8083063 Iustin Pop
3486 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3487 a8083063 Iustin Pop
    if node_insts is False:
3488 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3489 a8083063 Iustin Pop
3490 a8083063 Iustin Pop
    if instance.name not in node_insts:
3491 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3492 a8083063 Iustin Pop
3493 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3494 a8083063 Iustin Pop
3495 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3496 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3497 b047857b Michael Hanselmann
3498 82122173 Iustin Pop
    # build ssh cmdline
3499 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3500 a8083063 Iustin Pop
3501 a8083063 Iustin Pop
3502 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3503 a8083063 Iustin Pop
  """Replace the disks of an instance.
3504 a8083063 Iustin Pop

3505 a8083063 Iustin Pop
  """
3506 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3507 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3508 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3509 a8083063 Iustin Pop
3510 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3511 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3512 b6e82a65 Iustin Pop

3513 b6e82a65 Iustin Pop
    """
3514 b6e82a65 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3515 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3516 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3517 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3518 b6e82a65 Iustin Pop
3519 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3520 b6e82a65 Iustin Pop
3521 b6e82a65 Iustin Pop
    if not ial.success:
3522 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3523 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3524 b6e82a65 Iustin Pop
                                                           ial.info))
3525 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3526 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3527 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3528 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3529 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3530 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3531 b6e82a65 Iustin Pop
                    self.op.remote_node)
3532 b6e82a65 Iustin Pop
3533 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3534 a8083063 Iustin Pop
    """Build hooks env.
3535 a8083063 Iustin Pop

3536 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3537 a8083063 Iustin Pop

3538 a8083063 Iustin Pop
    """
3539 a8083063 Iustin Pop
    env = {
3540 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3541 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3542 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3543 a8083063 Iustin Pop
      }
3544 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3545 0834c866 Iustin Pop
    nl = [
3546 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3547 0834c866 Iustin Pop
      self.instance.primary_node,
3548 0834c866 Iustin Pop
      ]
3549 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3550 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3551 a8083063 Iustin Pop
    return env, nl, nl
3552 a8083063 Iustin Pop
3553 a8083063 Iustin Pop
  def CheckPrereq(self):
3554 a8083063 Iustin Pop
    """Check prerequisites.
3555 a8083063 Iustin Pop

3556 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3557 a8083063 Iustin Pop

3558 a8083063 Iustin Pop
    """
3559 b6e82a65 Iustin Pop
    if not hasattr(self.op, "remote_node"):
3560 b6e82a65 Iustin Pop
      self.op.remote_node = None
3561 b6e82a65 Iustin Pop
3562 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3563 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3564 a8083063 Iustin Pop
    if instance is None:
3565 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3566 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3567 a8083063 Iustin Pop
    self.instance = instance
3568 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3569 a8083063 Iustin Pop
3570 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3571 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3572 a9e0c397 Iustin Pop
                                 " network mirrored.")
3573 a8083063 Iustin Pop
3574 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3575 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3576 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3577 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3578 a8083063 Iustin Pop
3579 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3580 a9e0c397 Iustin Pop
3581 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3582 b6e82a65 Iustin Pop
    if ia_name is not None:
3583 b6e82a65 Iustin Pop
      if self.op.remote_node is not None:
3584 b6e82a65 Iustin Pop
        raise errors.OpPrereqError("Give either the iallocator or the new"
3585 b6e82a65 Iustin Pop
                                   " secondary, not both")
3586 de8c7666 Guido Trotter
      self._RunAllocator()
3587 b6e82a65 Iustin Pop
3588 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3589 a9e0c397 Iustin Pop
    if remote_node is not None:
3590 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3591 a8083063 Iustin Pop
      if remote_node is None:
3592 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3593 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3594 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3595 a9e0c397 Iustin Pop
    else:
3596 a9e0c397 Iustin Pop
      self.remote_node_info = None
3597 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3598 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3599 3ecf6786 Iustin Pop
                                 " the instance.")
3600 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3601 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3602 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3603 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3604 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3605 0834c866 Iustin Pop
                                   " replacement")
3606 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3607 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3608 7df43a76 Iustin Pop
          remote_node is not None):
3609 7df43a76 Iustin Pop
        # switch to replace secondary mode
3610 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3611 7df43a76 Iustin Pop
3612 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3613 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3614 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3615 a9e0c397 Iustin Pop
                                   " both at once")
3616 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3617 a9e0c397 Iustin Pop
        if remote_node is not None:
3618 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3619 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3620 a9e0c397 Iustin Pop
                                     " node disk replacement")
3621 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3622 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3623 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3624 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3625 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3626 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3627 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3628 a9e0c397 Iustin Pop
      else:
3629 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3630 a9e0c397 Iustin Pop
3631 a9e0c397 Iustin Pop
    for name in self.op.disks:
3632 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3633 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3634 a9e0c397 Iustin Pop
                                   (name, instance.name))
3635 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3636 a8083063 Iustin Pop
3637 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3638 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3639 a9e0c397 Iustin Pop

3640 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3641 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3642 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3643 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3644 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3645 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3646 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3647 a9e0c397 Iustin Pop
      - wait for sync across all devices
3648 a9e0c397 Iustin Pop
      - for each modified disk:
3649 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3650 a9e0c397 Iustin Pop

3651 a9e0c397 Iustin Pop
    Failures are not very well handled.
3652 cff90b79 Iustin Pop

3653 a9e0c397 Iustin Pop
    """
3654 cff90b79 Iustin Pop
    steps_total = 6
3655 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3656 a9e0c397 Iustin Pop
    instance = self.instance
3657 a9e0c397 Iustin Pop
    iv_names = {}
3658 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3659 a9e0c397 Iustin Pop
    # start of work
3660 a9e0c397 Iustin Pop
    cfg = self.cfg
3661 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3662 cff90b79 Iustin Pop
    oth_node = self.oth_node
3663 cff90b79 Iustin Pop
3664 cff90b79 Iustin Pop
    # Step: check device activation
3665 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3666 cff90b79 Iustin Pop
    info("checking volume groups")
3667 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3668 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3669 cff90b79 Iustin Pop
    if not results:
3670 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3671 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3672 cff90b79 Iustin Pop
      res = results.get(node, False)
3673 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3674 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3675 cff90b79 Iustin Pop
                                 (my_vg, node))
3676 cff90b79 Iustin Pop
    for dev in instance.disks:
3677 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3678 cff90b79 Iustin Pop
        continue
3679 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3680 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3681 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3682 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3683 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3684 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3685 cff90b79 Iustin Pop
3686 cff90b79 Iustin Pop
    # Step: check other node consistency
3687 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3688 cff90b79 Iustin Pop
    for dev in instance.disks:
3689 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3690 cff90b79 Iustin Pop
        continue
3691 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3692 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3693 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3694 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3695 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3696 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3697 cff90b79 Iustin Pop
3698 cff90b79 Iustin Pop
    # Step: create new storage
3699 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3700 a9e0c397 Iustin Pop
    for dev in instance.disks:
3701 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3702 a9e0c397 Iustin Pop
        continue
3703 a9e0c397 Iustin Pop
      size = dev.size
3704 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3705 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3706 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3707 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3708 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3709 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3710 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3711 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3712 a9e0c397 Iustin Pop
      old_lvs = dev.children
3713 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3714 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3715 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3716 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3717 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3718 a9e0c397 Iustin Pop
      # are talking about the secondary node
3719 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3720 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3721 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3722 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3723 a9e0c397 Iustin Pop
                                   " node '%s'" %
3724 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3725 a9e0c397 Iustin Pop
3726 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3727 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3728 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3729 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3730 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3731 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3732 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3733 cff90b79 Iustin Pop
      #dev.children = []
3734 cff90b79 Iustin Pop
      #cfg.Update(instance)
3735 a9e0c397 Iustin Pop
3736 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3737 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3738 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3739 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3740 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3741 cff90b79 Iustin Pop
3742 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3743 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3744 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3745 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3746 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3747 cff90b79 Iustin Pop
      rlist = []
3748 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3749 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3750 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3751 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3752 cff90b79 Iustin Pop
3753 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3754 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3755 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3756 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3757 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3758 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3759 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3760 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3761 cff90b79 Iustin Pop
3762 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3763 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3764 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3765 a9e0c397 Iustin Pop
3766 cff90b79 Iustin Pop
      for disk in old_lvs:
3767 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3768 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3769 a9e0c397 Iustin Pop
3770 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3771 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3772 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3773 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3774 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3775 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3776 cff90b79 Iustin Pop
                    " logical volumes")
3777 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3778 a9e0c397 Iustin Pop
3779 a9e0c397 Iustin Pop
      dev.children = new_lvs
3780 a9e0c397 Iustin Pop
      cfg.Update(instance)
3781 a9e0c397 Iustin Pop
3782 cff90b79 Iustin Pop
    # Step: wait for sync
3783 a9e0c397 Iustin Pop
3784 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3785 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3786 a9e0c397 Iustin Pop
    # return value
3787 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3788 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3789 a9e0c397 Iustin Pop
3790 a9e0c397 Iustin Pop
    # so check manually all the devices
3791 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3792 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3793 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3794 a9e0c397 Iustin Pop
      if is_degr:
3795 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3796 a9e0c397 Iustin Pop
3797 cff90b79 Iustin Pop
    # Step: remove old storage
3798 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3799 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3800 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3801 a9e0c397 Iustin Pop
      for lv in old_lvs:
3802 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3803 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3804 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3805 a9e0c397 Iustin Pop
          continue
3806 a9e0c397 Iustin Pop
3807 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3808 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3809 a9e0c397 Iustin Pop

3810 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3811 a9e0c397 Iustin Pop
      - for all disks of the instance:
3812 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3813 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3814 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3815 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3816 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3817 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3818 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3819 a9e0c397 Iustin Pop
          not network enabled
3820 a9e0c397 Iustin Pop
      - wait for sync across all devices
3821 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3822 a9e0c397 Iustin Pop

3823 a9e0c397 Iustin Pop
    Failures are not very well handled.
3824 0834c866 Iustin Pop

3825 a9e0c397 Iustin Pop
    """
3826 0834c866 Iustin Pop
    steps_total = 6
3827 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3828 a9e0c397 Iustin Pop
    instance = self.instance
3829 a9e0c397 Iustin Pop
    iv_names = {}
3830 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3831 a9e0c397 Iustin Pop
    # start of work
3832 a9e0c397 Iustin Pop
    cfg = self.cfg
3833 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3834 a9e0c397 Iustin Pop
    new_node = self.new_node
3835 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3836 0834c866 Iustin Pop
3837 0834c866 Iustin Pop
    # Step: check device activation
3838 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3839 0834c866 Iustin Pop
    info("checking volume groups")
3840 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3841 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3842 0834c866 Iustin Pop
    if not results:
3843 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3844 0834c866 Iustin Pop
    for node in pri_node, new_node:
3845 0834c866 Iustin Pop
      res = results.get(node, False)
3846 0834c866 Iustin Pop
      if not res or my_vg not in res:
3847 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3848 0834c866 Iustin Pop
                                 (my_vg, node))
3849 0834c866 Iustin Pop
    for dev in instance.disks:
3850 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3851 0834c866 Iustin Pop
        continue
3852 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3853 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3854 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3855 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3856 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3857 0834c866 Iustin Pop
3858 0834c866 Iustin Pop
    # Step: check other node consistency
3859 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3860 0834c866 Iustin Pop
    for dev in instance.disks:
3861 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3862 0834c866 Iustin Pop
        continue
3863 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3864 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3865 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3866 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3867 0834c866 Iustin Pop
                                 pri_node)
3868 0834c866 Iustin Pop
3869 0834c866 Iustin Pop
    # Step: create new storage
3870 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3871 a9e0c397 Iustin Pop
    for dev in instance.disks:
3872 a9e0c397 Iustin Pop
      size = dev.size
3873 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3874 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3875 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3876 a9e0c397 Iustin Pop
      # are talking about the secondary node
3877 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3878 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3879 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3880 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3881 a9e0c397 Iustin Pop
                                   " node '%s'" %
3882 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3883 a9e0c397 Iustin Pop
3884 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3885 0834c866 Iustin Pop
3886 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3887 0834c866 Iustin Pop
    for dev in instance.disks:
3888 0834c866 Iustin Pop
      size = dev.size
3889 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3890 a9e0c397 Iustin Pop
      # create new devices on new_node
3891 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3892 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3893 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3894 a9e0c397 Iustin Pop
                              children=dev.children)
3895 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3896 3f78eef2 Iustin Pop
                                        new_drbd, False,
3897 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3898 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3899 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3900 a9e0c397 Iustin Pop
3901 0834c866 Iustin Pop
    for dev in instance.disks:
3902 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3903 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3904 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3905 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3906 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3907 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
3908 a9e0c397 Iustin Pop
3909 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
3910 642445d9 Iustin Pop
    done = 0
3911 642445d9 Iustin Pop
    for dev in instance.disks:
3912 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3913 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
3914 642445d9 Iustin Pop
      # detach from network
3915 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
3916 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
3917 642445d9 Iustin Pop
      # standalone state
3918 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
3919 642445d9 Iustin Pop
        done += 1
3920 642445d9 Iustin Pop
      else:
3921 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
3922 642445d9 Iustin Pop
                dev.iv_name)
3923 642445d9 Iustin Pop
3924 642445d9 Iustin Pop
    if not done:
3925 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
3926 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3927 642445d9 Iustin Pop
3928 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
3929 642445d9 Iustin Pop
    # the instance to point to the new secondary
3930 642445d9 Iustin Pop
    info("updating instance configuration")
3931 642445d9 Iustin Pop
    for dev in instance.disks:
3932 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3933 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3934 642445d9 Iustin Pop
    cfg.Update(instance)
3935 a9e0c397 Iustin Pop
3936 642445d9 Iustin Pop
    # and now perform the drbd attach
3937 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
3938 642445d9 Iustin Pop
    failures = []
3939 642445d9 Iustin Pop
    for dev in instance.disks:
3940 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3941 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
3942 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
3943 642445d9 Iustin Pop
      # is correct
3944 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3945 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3946 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3947 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
3948 a9e0c397 Iustin Pop
3949 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3950 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3951 a9e0c397 Iustin Pop
    # return value
3952 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3953 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3954 a9e0c397 Iustin Pop
3955 a9e0c397 Iustin Pop
    # so check manually all the devices
3956 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3957 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3958 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3959 a9e0c397 Iustin Pop
      if is_degr:
3960 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3961 a9e0c397 Iustin Pop
3962 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3963 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3964 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
3965 a9e0c397 Iustin Pop
      for lv in old_lvs:
3966 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3967 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3968 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
3969 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
3970 a9e0c397 Iustin Pop
3971 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3972 a9e0c397 Iustin Pop
    """Execute disk replacement.
3973 a9e0c397 Iustin Pop

3974 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3975 a9e0c397 Iustin Pop

3976 a9e0c397 Iustin Pop
    """
3977 a9e0c397 Iustin Pop
    instance = self.instance
3978 22985314 Guido Trotter
3979 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
3980 22985314 Guido Trotter
    if instance.status == "down":
3981 023e3296 Guido Trotter
      _StartInstanceDisks(self.cfg, instance, True)
3982 22985314 Guido Trotter
3983 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3984 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
3985 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
3986 a9e0c397 Iustin Pop
      else:
3987 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
3988 a9e0c397 Iustin Pop
    else:
3989 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
3990 22985314 Guido Trotter
3991 22985314 Guido Trotter
    ret = fn(feedback_fn)
3992 22985314 Guido Trotter
3993 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
3994 22985314 Guido Trotter
    if instance.status == "down":
3995 023e3296 Guido Trotter
      _SafeShutdownInstanceDisks(instance, self.cfg)
3996 22985314 Guido Trotter
3997 22985314 Guido Trotter
    return ret
3998 a9e0c397 Iustin Pop
3999 a8083063 Iustin Pop
4000 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
4001 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
4002 8729e0d7 Iustin Pop

4003 8729e0d7 Iustin Pop
  """
4004 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
4005 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4006 8729e0d7 Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount"]
4007 31e63dbf Guido Trotter
  REQ_BGL = False
4008 31e63dbf Guido Trotter
4009 31e63dbf Guido Trotter
  def ExpandNames(self):
4010 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
4011 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4012 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4013 31e63dbf Guido Trotter
4014 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
4015 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
4016 31e63dbf Guido Trotter
      self._LockInstancesNodes()
4017 8729e0d7 Iustin Pop
4018 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
4019 8729e0d7 Iustin Pop
    """Build hooks env.
4020 8729e0d7 Iustin Pop

4021 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4022 8729e0d7 Iustin Pop

4023 8729e0d7 Iustin Pop
    """
4024 8729e0d7 Iustin Pop
    env = {
4025 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4026 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4027 8729e0d7 Iustin Pop
      }
4028 8729e0d7 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4029 8729e0d7 Iustin Pop
    nl = [
4030 8729e0d7 Iustin Pop
      self.sstore.GetMasterNode(),
4031 8729e0d7 Iustin Pop
      self.instance.primary_node,
4032 8729e0d7 Iustin Pop
      ]
4033 8729e0d7 Iustin Pop
    return env, nl, nl
4034 8729e0d7 Iustin Pop
4035 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4036 8729e0d7 Iustin Pop
    """Check prerequisites.
4037 8729e0d7 Iustin Pop

4038 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4039 8729e0d7 Iustin Pop

4040 8729e0d7 Iustin Pop
    """
4041 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4042 31e63dbf Guido Trotter
    assert instance is not None, \
4043 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4044 31e63dbf Guido Trotter
4045 8729e0d7 Iustin Pop
    self.instance = instance
4046 8729e0d7 Iustin Pop
4047 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4048 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4049 8729e0d7 Iustin Pop
                                 " growing.")
4050 8729e0d7 Iustin Pop
4051 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
4052 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4053 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
4054 8729e0d7 Iustin Pop
4055 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4056 8729e0d7 Iustin Pop
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
4057 8729e0d7 Iustin Pop
    for node in nodenames:
4058 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4059 8729e0d7 Iustin Pop
      if not info:
4060 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4061 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4062 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4063 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4064 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4065 8729e0d7 Iustin Pop
                                   " node %s" % node)
4066 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4067 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4068 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4069 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4070 8729e0d7 Iustin Pop
4071 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4072 8729e0d7 Iustin Pop
    """Execute disk grow.
4073 8729e0d7 Iustin Pop

4074 8729e0d7 Iustin Pop
    """
4075 8729e0d7 Iustin Pop
    instance = self.instance
4076 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
4077 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4078 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4079 8729e0d7 Iustin Pop
      result = rpc.call_blockdev_grow(node, disk, self.op.amount)
4080 86de84dd Guido Trotter
      if not result or not isinstance(result, (list, tuple)) or len(result) != 2:
4081 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4082 8729e0d7 Iustin Pop
      elif not result[0]:
4083 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4084 8729e0d7 Iustin Pop
                                 (node, result[1]))
4085 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4086 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4087 8729e0d7 Iustin Pop
    return
4088 8729e0d7 Iustin Pop
4089 8729e0d7 Iustin Pop
4090 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4091 a8083063 Iustin Pop
  """Query runtime instance data.
4092 a8083063 Iustin Pop

4093 a8083063 Iustin Pop
  """
4094 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
4095 a8083063 Iustin Pop
4096 a8083063 Iustin Pop
  def CheckPrereq(self):
4097 a8083063 Iustin Pop
    """Check prerequisites.
4098 a8083063 Iustin Pop

4099 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4100 a8083063 Iustin Pop

4101 a8083063 Iustin Pop
    """
4102 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
4103 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4104 a8083063 Iustin Pop
    if self.op.instances:
4105 a8083063 Iustin Pop
      self.wanted_instances = []
4106 a8083063 Iustin Pop
      names = self.op.instances
4107 a8083063 Iustin Pop
      for name in names:
4108 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
4109 a8083063 Iustin Pop
        if instance is None:
4110 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
4111 515207af Guido Trotter
        self.wanted_instances.append(instance)
4112 a8083063 Iustin Pop
    else:
4113 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4114 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
4115 a8083063 Iustin Pop
    return
4116 a8083063 Iustin Pop
4117 a8083063 Iustin Pop
4118 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4119 a8083063 Iustin Pop
    """Compute block device status.
4120 a8083063 Iustin Pop

4121 a8083063 Iustin Pop
    """
4122 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4123 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4124 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4125 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4126 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4127 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4128 a8083063 Iustin Pop
      else:
4129 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4130 a8083063 Iustin Pop
4131 a8083063 Iustin Pop
    if snode:
4132 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4133 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4134 a8083063 Iustin Pop
    else:
4135 a8083063 Iustin Pop
      dev_sstatus = None
4136 a8083063 Iustin Pop
4137 a8083063 Iustin Pop
    if dev.children:
4138 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4139 a8083063 Iustin Pop
                      for child in dev.children]
4140 a8083063 Iustin Pop
    else:
4141 a8083063 Iustin Pop
      dev_children = []
4142 a8083063 Iustin Pop
4143 a8083063 Iustin Pop
    data = {
4144 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4145 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4146 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4147 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4148 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4149 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4150 a8083063 Iustin Pop
      "children": dev_children,
4151 a8083063 Iustin Pop
      }
4152 a8083063 Iustin Pop
4153 a8083063 Iustin Pop
    return data
4154 a8083063 Iustin Pop
4155 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4156 a8083063 Iustin Pop
    """Gather and return data"""
4157 a8083063 Iustin Pop
    result = {}
4158 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4159 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4160 a8083063 Iustin Pop
                                                instance.name)
4161 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4162 a8083063 Iustin Pop
        remote_state = "up"
4163 a8083063 Iustin Pop
      else:
4164 a8083063 Iustin Pop
        remote_state = "down"
4165 a8083063 Iustin Pop
      if instance.status == "down":
4166 a8083063 Iustin Pop
        config_state = "down"
4167 a8083063 Iustin Pop
      else:
4168 a8083063 Iustin Pop
        config_state = "up"
4169 a8083063 Iustin Pop
4170 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4171 a8083063 Iustin Pop
               for device in instance.disks]
4172 a8083063 Iustin Pop
4173 a8083063 Iustin Pop
      idict = {
4174 a8083063 Iustin Pop
        "name": instance.name,
4175 a8083063 Iustin Pop
        "config_state": config_state,
4176 a8083063 Iustin Pop
        "run_state": remote_state,
4177 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4178 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4179 a8083063 Iustin Pop
        "os": instance.os,
4180 a8083063 Iustin Pop
        "memory": instance.memory,
4181 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4182 a8083063 Iustin Pop
        "disks": disks,
4183 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4184 a8083063 Iustin Pop
        }
4185 a8083063 Iustin Pop
4186 a8340917 Iustin Pop
      htkind = self.sstore.GetHypervisorType()
4187 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_PVM30:
4188 a8340917 Iustin Pop
        idict["kernel_path"] = instance.kernel_path
4189 a8340917 Iustin Pop
        idict["initrd_path"] = instance.initrd_path
4190 a8340917 Iustin Pop
4191 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_HVM31:
4192 a8340917 Iustin Pop
        idict["hvm_boot_order"] = instance.hvm_boot_order
4193 a8340917 Iustin Pop
        idict["hvm_acpi"] = instance.hvm_acpi
4194 a8340917 Iustin Pop
        idict["hvm_pae"] = instance.hvm_pae
4195 a8340917 Iustin Pop
        idict["hvm_cdrom_image_path"] = instance.hvm_cdrom_image_path
4196 5397e0b7 Alexander Schreiber
        idict["hvm_nic_type"] = instance.hvm_nic_type
4197 5397e0b7 Alexander Schreiber
        idict["hvm_disk_type"] = instance.hvm_disk_type
4198 a8340917 Iustin Pop
4199 a8340917 Iustin Pop
      if htkind in constants.HTS_REQ_PORT:
4200 d0c11cf7 Alexander Schreiber
        if instance.vnc_bind_address is None:
4201 d0c11cf7 Alexander Schreiber
          vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4202 d0c11cf7 Alexander Schreiber
        else:
4203 d0c11cf7 Alexander Schreiber
          vnc_bind_address = instance.vnc_bind_address
4204 34b6ab97 Alexander Schreiber
        if instance.network_port is None:
4205 34b6ab97 Alexander Schreiber
          vnc_console_port = None
4206 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.BIND_ADDRESS_GLOBAL:
4207 a4273aba Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.primary_node,
4208 34b6ab97 Alexander Schreiber
                                       instance.network_port)
4209 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.LOCALHOST_IP_ADDRESS:
4210 d0c11cf7 Alexander Schreiber
          vnc_console_port = "%s:%s on node %s" % (vnc_bind_address,
4211 a4273aba Alexander Schreiber
                                                   instance.network_port,
4212 a4273aba Alexander Schreiber
                                                   instance.primary_node)
4213 34b6ab97 Alexander Schreiber
        else:
4214 34b6ab97 Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.vnc_bind_address,
4215 34b6ab97 Alexander Schreiber
                                        instance.network_port)
4216 34b6ab97 Alexander Schreiber
        idict["vnc_console_port"] = vnc_console_port
4217 d0c11cf7 Alexander Schreiber
        idict["vnc_bind_address"] = vnc_bind_address
4218 a8340917 Iustin Pop
        idict["network_port"] = instance.network_port
4219 a8340917 Iustin Pop
4220 a8083063 Iustin Pop
      result[instance.name] = idict
4221 a8083063 Iustin Pop
4222 a8083063 Iustin Pop
    return result
4223 a8083063 Iustin Pop
4224 a8083063 Iustin Pop
4225 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4226 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4227 a8083063 Iustin Pop

4228 a8083063 Iustin Pop
  """
4229 a8083063 Iustin Pop
  HPATH = "instance-modify"
4230 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4231 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4232 1a5c7281 Guido Trotter
  REQ_BGL = False
4233 1a5c7281 Guido Trotter
4234 1a5c7281 Guido Trotter
  def ExpandNames(self):
4235 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4236 a8083063 Iustin Pop
4237 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4238 a8083063 Iustin Pop
    """Build hooks env.
4239 a8083063 Iustin Pop

4240 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4241 a8083063 Iustin Pop

4242 a8083063 Iustin Pop
    """
4243 396e1b78 Michael Hanselmann
    args = dict()
4244 a8083063 Iustin Pop
    if self.mem:
4245 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4246 a8083063 Iustin Pop
    if self.vcpus:
4247 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4248 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4249 396e1b78 Michael Hanselmann
      if self.do_ip:
4250 396e1b78 Michael Hanselmann
        ip = self.ip
4251 396e1b78 Michael Hanselmann
      else:
4252 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4253 396e1b78 Michael Hanselmann
      if self.bridge:
4254 396e1b78 Michael Hanselmann
        bridge = self.bridge
4255 396e1b78 Michael Hanselmann
      else:
4256 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4257 ef756965 Iustin Pop
      if self.mac:
4258 ef756965 Iustin Pop
        mac = self.mac
4259 ef756965 Iustin Pop
      else:
4260 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4261 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4262 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4263 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4264 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4265 a8083063 Iustin Pop
    return env, nl, nl
4266 a8083063 Iustin Pop
4267 a8083063 Iustin Pop
  def CheckPrereq(self):
4268 a8083063 Iustin Pop
    """Check prerequisites.
4269 a8083063 Iustin Pop

4270 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4271 a8083063 Iustin Pop

4272 a8083063 Iustin Pop
    """
4273 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4274 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4275 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4276 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4277 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4278 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4279 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4280 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4281 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4282 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4283 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4284 31a853d2 Iustin Pop
    self.hvm_acpi = getattr(self.op, "hvm_acpi", None)
4285 31a853d2 Iustin Pop
    self.hvm_pae = getattr(self.op, "hvm_pae", None)
4286 5397e0b7 Alexander Schreiber
    self.hvm_nic_type = getattr(self.op, "hvm_nic_type", None)
4287 5397e0b7 Alexander Schreiber
    self.hvm_disk_type = getattr(self.op, "hvm_disk_type", None)
4288 31a853d2 Iustin Pop
    self.hvm_cdrom_image_path = getattr(self.op, "hvm_cdrom_image_path", None)
4289 31a853d2 Iustin Pop
    self.vnc_bind_address = getattr(self.op, "vnc_bind_address", None)
4290 4300c4b6 Guido Trotter
    self.force = getattr(self.op, "force", None)
4291 31a853d2 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4292 31a853d2 Iustin Pop
                 self.kernel_path, self.initrd_path, self.hvm_boot_order,
4293 31a853d2 Iustin Pop
                 self.hvm_acpi, self.hvm_pae, self.hvm_cdrom_image_path,
4294 5397e0b7 Alexander Schreiber
                 self.vnc_bind_address, self.hvm_nic_type, self.hvm_disk_type]
4295 31a853d2 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4296 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4297 a8083063 Iustin Pop
    if self.mem is not None:
4298 a8083063 Iustin Pop
      try:
4299 a8083063 Iustin Pop
        self.mem = int(self.mem)
4300 a8083063 Iustin Pop
      except ValueError, err:
4301 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4302 a8083063 Iustin Pop
    if self.vcpus is not None:
4303 a8083063 Iustin Pop
      try:
4304 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4305 a8083063 Iustin Pop
      except ValueError, err:
4306 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4307 a8083063 Iustin Pop
    if self.ip is not None:
4308 a8083063 Iustin Pop
      self.do_ip = True
4309 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4310 a8083063 Iustin Pop
        self.ip = None
4311 a8083063 Iustin Pop
      else:
4312 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4313 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4314 a8083063 Iustin Pop
    else:
4315 a8083063 Iustin Pop
      self.do_ip = False
4316 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4317 1862d460 Alexander Schreiber
    if self.mac is not None:
4318 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4319 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4320 1862d460 Alexander Schreiber
                                   self.mac)
4321 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4322 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4323 a8083063 Iustin Pop
4324 973d7867 Iustin Pop
    if self.kernel_path is not None:
4325 973d7867 Iustin Pop
      self.do_kernel_path = True
4326 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4327 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4328 973d7867 Iustin Pop
4329 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4330 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4331 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4332 973d7867 Iustin Pop
                                    " filename")
4333 8cafeb26 Iustin Pop
    else:
4334 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4335 973d7867 Iustin Pop
4336 973d7867 Iustin Pop
    if self.initrd_path is not None:
4337 973d7867 Iustin Pop
      self.do_initrd_path = True
4338 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4339 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4340 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4341 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4342 973d7867 Iustin Pop
                                    " filename")
4343 8cafeb26 Iustin Pop
    else:
4344 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4345 973d7867 Iustin Pop
4346 25c5878d Alexander Schreiber
    # boot order verification
4347 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4348 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4349 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4350 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4351 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4352 25c5878d Alexander Schreiber
                                     " or 'default'")
4353 25c5878d Alexander Schreiber
4354 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
4355 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
4356 3fc175f0 Alexander Schreiber
      if not (os.path.isabs(self.op.hvm_cdrom_image_path) or
4357 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4358 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
4359 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
4360 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4361 3fc175f0 Alexander Schreiber
      if not (os.path.isfile(self.op.hvm_cdrom_image_path) or
4362 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4363 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
4364 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
4365 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
4366 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4367 31a853d2 Iustin Pop
4368 31a853d2 Iustin Pop
    # vnc_bind_address verification
4369 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
4370 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
4371 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
4372 31a853d2 Iustin Pop
                                   " like a valid IP address" %
4373 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
4374 31a853d2 Iustin Pop
4375 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4376 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4377 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4378 cfefe007 Guido Trotter
    self.warn = []
4379 cfefe007 Guido Trotter
    if self.mem is not None and not self.force:
4380 cfefe007 Guido Trotter
      pnode = self.instance.primary_node
4381 cfefe007 Guido Trotter
      nodelist = [pnode]
4382 cfefe007 Guido Trotter
      nodelist.extend(instance.secondary_nodes)
4383 cfefe007 Guido Trotter
      instance_info = rpc.call_instance_info(pnode, instance.name)
4384 cfefe007 Guido Trotter
      nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
4385 cfefe007 Guido Trotter
4386 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4387 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4388 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4389 cfefe007 Guido Trotter
      else:
4390 cfefe007 Guido Trotter
        if instance_info:
4391 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4392 cfefe007 Guido Trotter
        else:
4393 cfefe007 Guido Trotter
          # Assume instance not running
4394 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4395 cfefe007 Guido Trotter
          # and we have no other way to check)
4396 cfefe007 Guido Trotter
          current_mem = 0
4397 cfefe007 Guido Trotter
        miss_mem = self.mem - current_mem - nodeinfo[pnode]['memory_free']
4398 cfefe007 Guido Trotter
        if miss_mem > 0:
4399 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4400 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4401 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4402 cfefe007 Guido Trotter
4403 cfefe007 Guido Trotter
      for node in instance.secondary_nodes:
4404 cfefe007 Guido Trotter
        if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4405 cfefe007 Guido Trotter
          self.warn.append("Can't get info from secondary node %s" % node)
4406 cfefe007 Guido Trotter
        elif self.mem > nodeinfo[node]['memory_free']:
4407 cfefe007 Guido Trotter
          self.warn.append("Not enough memory to failover instance to secondary"
4408 cfefe007 Guido Trotter
                           " node %s" % node)
4409 cfefe007 Guido Trotter
4410 5bc84f33 Alexander Schreiber
    # Xen HVM device type checks
4411 5bc84f33 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
4412 5bc84f33 Alexander Schreiber
      if self.op.hvm_nic_type is not None:
4413 5bc84f33 Alexander Schreiber
        if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
4414 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid NIC type %s specified for Xen"
4415 5bc84f33 Alexander Schreiber
                                     " HVM  hypervisor" % self.op.hvm_nic_type)
4416 5bc84f33 Alexander Schreiber
      if self.op.hvm_disk_type is not None:
4417 5bc84f33 Alexander Schreiber
        if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
4418 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid disk type %s specified for Xen"
4419 5bc84f33 Alexander Schreiber
                                     " HVM hypervisor" % self.op.hvm_disk_type)
4420 5bc84f33 Alexander Schreiber
4421 a8083063 Iustin Pop
    return
4422 a8083063 Iustin Pop
4423 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4424 a8083063 Iustin Pop
    """Modifies an instance.
4425 a8083063 Iustin Pop

4426 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4427 a8083063 Iustin Pop
    """
4428 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4429 cfefe007 Guido Trotter
    # feedback_fn there.
4430 cfefe007 Guido Trotter
    for warn in self.warn:
4431 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4432 cfefe007 Guido Trotter
4433 a8083063 Iustin Pop
    result = []
4434 a8083063 Iustin Pop
    instance = self.instance
4435 a8083063 Iustin Pop
    if self.mem:
4436 a8083063 Iustin Pop
      instance.memory = self.mem
4437 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4438 a8083063 Iustin Pop
    if self.vcpus:
4439 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4440 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4441 a8083063 Iustin Pop
    if self.do_ip:
4442 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4443 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4444 a8083063 Iustin Pop
    if self.bridge:
4445 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4446 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4447 1862d460 Alexander Schreiber
    if self.mac:
4448 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4449 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4450 973d7867 Iustin Pop
    if self.do_kernel_path:
4451 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4452 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4453 973d7867 Iustin Pop
    if self.do_initrd_path:
4454 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4455 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4456 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4457 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4458 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4459 25c5878d Alexander Schreiber
      else:
4460 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4461 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4462 3fc175f0 Alexander Schreiber
    if self.hvm_acpi is not None:
4463 ec1ba002 Iustin Pop
      instance.hvm_acpi = self.hvm_acpi
4464 31a853d2 Iustin Pop
      result.append(("hvm_acpi", self.hvm_acpi))
4465 3fc175f0 Alexander Schreiber
    if self.hvm_pae is not None:
4466 ec1ba002 Iustin Pop
      instance.hvm_pae = self.hvm_pae
4467 31a853d2 Iustin Pop
      result.append(("hvm_pae", self.hvm_pae))
4468 5397e0b7 Alexander Schreiber
    if self.hvm_nic_type is not None:
4469 5397e0b7 Alexander Schreiber
      instance.hvm_nic_type = self.hvm_nic_type
4470 5397e0b7 Alexander Schreiber
      result.append(("hvm_nic_type", self.hvm_nic_type))
4471 5397e0b7 Alexander Schreiber
    if self.hvm_disk_type is not None:
4472 5397e0b7 Alexander Schreiber
      instance.hvm_disk_type = self.hvm_disk_type
4473 5397e0b7 Alexander Schreiber
      result.append(("hvm_disk_type", self.hvm_disk_type))
4474 31a853d2 Iustin Pop
    if self.hvm_cdrom_image_path:
4475 3fc175f0 Alexander Schreiber
      if self.hvm_cdrom_image_path == constants.VALUE_NONE:
4476 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = None
4477 3fc175f0 Alexander Schreiber
      else:
4478 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
4479 31a853d2 Iustin Pop
      result.append(("hvm_cdrom_image_path", self.hvm_cdrom_image_path))
4480 31a853d2 Iustin Pop
    if self.vnc_bind_address:
4481 31a853d2 Iustin Pop
      instance.vnc_bind_address = self.vnc_bind_address
4482 31a853d2 Iustin Pop
      result.append(("vnc_bind_address", self.vnc_bind_address))
4483 a8083063 Iustin Pop
4484 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4485 a8083063 Iustin Pop
4486 a8083063 Iustin Pop
    return result
4487 a8083063 Iustin Pop
4488 a8083063 Iustin Pop
4489 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4490 a8083063 Iustin Pop
  """Query the exports list
4491 a8083063 Iustin Pop

4492 a8083063 Iustin Pop
  """
4493 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
4494 21a15682 Guido Trotter
  REQ_BGL = False
4495 21a15682 Guido Trotter
4496 21a15682 Guido Trotter
  def ExpandNames(self):
4497 21a15682 Guido Trotter
    self.needed_locks = {}
4498 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4499 21a15682 Guido Trotter
    if not self.op.nodes:
4500 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4501 21a15682 Guido Trotter
    else:
4502 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
4503 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
4504 a8083063 Iustin Pop
4505 a8083063 Iustin Pop
  def CheckPrereq(self):
4506 21a15682 Guido Trotter
    """Check prerequisites.
4507 a8083063 Iustin Pop

4508 a8083063 Iustin Pop
    """
4509 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
4510 a8083063 Iustin Pop
4511 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4512 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4513 a8083063 Iustin Pop

4514 a8083063 Iustin Pop
    Returns:
4515 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4516 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4517 a8083063 Iustin Pop
      that node.
4518 a8083063 Iustin Pop

4519 a8083063 Iustin Pop
    """
4520 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4521 a8083063 Iustin Pop
4522 a8083063 Iustin Pop
4523 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4524 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4525 a8083063 Iustin Pop

4526 a8083063 Iustin Pop
  """
4527 a8083063 Iustin Pop
  HPATH = "instance-export"
4528 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4529 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4530 6657590e Guido Trotter
  REQ_BGL = False
4531 6657590e Guido Trotter
4532 6657590e Guido Trotter
  def ExpandNames(self):
4533 6657590e Guido Trotter
    self._ExpandAndLockInstance()
4534 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
4535 6657590e Guido Trotter
    #
4536 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
4537 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
4538 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
4539 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
4540 6657590e Guido Trotter
    #    then one to remove, after
4541 6657590e Guido Trotter
    #  - removing the removal operation altoghether
4542 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4543 6657590e Guido Trotter
4544 6657590e Guido Trotter
  def DeclareLocks(self, level):
4545 6657590e Guido Trotter
    """Last minute lock declaration."""
4546 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
4547 a8083063 Iustin Pop
4548 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4549 a8083063 Iustin Pop
    """Build hooks env.
4550 a8083063 Iustin Pop

4551 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4552 a8083063 Iustin Pop

4553 a8083063 Iustin Pop
    """
4554 a8083063 Iustin Pop
    env = {
4555 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4556 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4557 a8083063 Iustin Pop
      }
4558 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4559 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4560 a8083063 Iustin Pop
          self.op.target_node]
4561 a8083063 Iustin Pop
    return env, nl, nl
4562 a8083063 Iustin Pop
4563 a8083063 Iustin Pop
  def CheckPrereq(self):
4564 a8083063 Iustin Pop
    """Check prerequisites.
4565 a8083063 Iustin Pop

4566 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4567 a8083063 Iustin Pop

4568 a8083063 Iustin Pop
    """
4569 6657590e Guido Trotter
    instance_name = self.op.instance_name
4570 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4571 6657590e Guido Trotter
    assert self.instance is not None, \
4572 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
4573 a8083063 Iustin Pop
4574 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
4575 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
4576 a8083063 Iustin Pop
4577 6657590e Guido Trotter
    assert self.dst_node is not None, \
4578 6657590e Guido Trotter
          "Cannot retrieve locked node %s" % self.op.target_node
4579 a8083063 Iustin Pop
4580 b6023d6c Manuel Franceschini
    # instance disk type verification
4581 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4582 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4583 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4584 b6023d6c Manuel Franceschini
                                   " file-based disks")
4585 b6023d6c Manuel Franceschini
4586 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4587 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4588 a8083063 Iustin Pop

4589 a8083063 Iustin Pop
    """
4590 a8083063 Iustin Pop
    instance = self.instance
4591 a8083063 Iustin Pop
    dst_node = self.dst_node
4592 a8083063 Iustin Pop
    src_node = instance.primary_node
4593 a8083063 Iustin Pop
    if self.op.shutdown:
4594 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4595 fb300fb7 Guido Trotter
      if not rpc.call_instance_shutdown(src_node, instance):
4596 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4597 38206f3c Iustin Pop
                                 (instance.name, src_node))
4598 a8083063 Iustin Pop
4599 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4600 a8083063 Iustin Pop
4601 a8083063 Iustin Pop
    snap_disks = []
4602 a8083063 Iustin Pop
4603 a8083063 Iustin Pop
    try:
4604 a8083063 Iustin Pop
      for disk in instance.disks:
4605 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4606 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4607 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4608 a8083063 Iustin Pop
4609 a8083063 Iustin Pop
          if not new_dev_name:
4610 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4611 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4612 a8083063 Iustin Pop
          else:
4613 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4614 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4615 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4616 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4617 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4618 a8083063 Iustin Pop
4619 a8083063 Iustin Pop
    finally:
4620 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4621 fb300fb7 Guido Trotter
        if not rpc.call_instance_start(src_node, instance, None):
4622 fb300fb7 Guido Trotter
          _ShutdownInstanceDisks(instance, self.cfg)
4623 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4624 a8083063 Iustin Pop
4625 a8083063 Iustin Pop
    # TODO: check for size
4626 a8083063 Iustin Pop
4627 a8083063 Iustin Pop
    for dev in snap_disks:
4628 16687b98 Manuel Franceschini
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
4629 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4630 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4631 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4632 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4633 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4634 a8083063 Iustin Pop
4635 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4636 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4637 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4638 a8083063 Iustin Pop
4639 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4640 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4641 a8083063 Iustin Pop
4642 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4643 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4644 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4645 a8083063 Iustin Pop
    if nodelist:
4646 204f2086 Guido Trotter
      exportlist = rpc.call_export_list(nodelist)
4647 a8083063 Iustin Pop
      for node in exportlist:
4648 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4649 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4650 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4651 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4652 5c947f38 Iustin Pop
4653 5c947f38 Iustin Pop
4654 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4655 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4656 9ac99fda Guido Trotter

4657 9ac99fda Guido Trotter
  """
4658 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4659 9ac99fda Guido Trotter
4660 9ac99fda Guido Trotter
  def CheckPrereq(self):
4661 9ac99fda Guido Trotter
    """Check prerequisites.
4662 9ac99fda Guido Trotter
    """
4663 9ac99fda Guido Trotter
    pass
4664 9ac99fda Guido Trotter
4665 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4666 9ac99fda Guido Trotter
    """Remove any export.
4667 9ac99fda Guido Trotter

4668 9ac99fda Guido Trotter
    """
4669 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4670 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4671 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4672 9ac99fda Guido Trotter
    fqdn_warn = False
4673 9ac99fda Guido Trotter
    if not instance_name:
4674 9ac99fda Guido Trotter
      fqdn_warn = True
4675 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4676 9ac99fda Guido Trotter
4677 204f2086 Guido Trotter
    exportlist = rpc.call_export_list(self.cfg.GetNodeList())
4678 9ac99fda Guido Trotter
    found = False
4679 9ac99fda Guido Trotter
    for node in exportlist:
4680 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4681 9ac99fda Guido Trotter
        found = True
4682 9ac99fda Guido Trotter
        if not rpc.call_export_remove(node, instance_name):
4683 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4684 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4685 9ac99fda Guido Trotter
4686 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4687 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4688 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4689 9ac99fda Guido Trotter
                  " Domain Name.")
4690 9ac99fda Guido Trotter
4691 9ac99fda Guido Trotter
4692 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4693 5c947f38 Iustin Pop
  """Generic tags LU.
4694 5c947f38 Iustin Pop

4695 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4696 5c947f38 Iustin Pop

4697 5c947f38 Iustin Pop
  """
4698 5c947f38 Iustin Pop
  def CheckPrereq(self):
4699 5c947f38 Iustin Pop
    """Check prerequisites.
4700 5c947f38 Iustin Pop

4701 5c947f38 Iustin Pop
    """
4702 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4703 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4704 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4705 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4706 5c947f38 Iustin Pop
      if name is None:
4707 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4708 3ecf6786 Iustin Pop
                                   (self.op.name,))
4709 5c947f38 Iustin Pop
      self.op.name = name
4710 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4711 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4712 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4713 5c947f38 Iustin Pop
      if name is None:
4714 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4715 3ecf6786 Iustin Pop
                                   (self.op.name,))
4716 5c947f38 Iustin Pop
      self.op.name = name
4717 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4718 5c947f38 Iustin Pop
    else:
4719 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4720 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4721 5c947f38 Iustin Pop
4722 5c947f38 Iustin Pop
4723 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4724 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4725 5c947f38 Iustin Pop

4726 5c947f38 Iustin Pop
  """
4727 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4728 5c947f38 Iustin Pop
4729 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4730 5c947f38 Iustin Pop
    """Returns the tag list.
4731 5c947f38 Iustin Pop

4732 5c947f38 Iustin Pop
    """
4733 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
4734 5c947f38 Iustin Pop
4735 5c947f38 Iustin Pop
4736 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4737 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4738 73415719 Iustin Pop

4739 73415719 Iustin Pop
  """
4740 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4741 73415719 Iustin Pop
4742 73415719 Iustin Pop
  def CheckPrereq(self):
4743 73415719 Iustin Pop
    """Check prerequisites.
4744 73415719 Iustin Pop

4745 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4746 73415719 Iustin Pop

4747 73415719 Iustin Pop
    """
4748 73415719 Iustin Pop
    try:
4749 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4750 73415719 Iustin Pop
    except re.error, err:
4751 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4752 73415719 Iustin Pop
                                 (self.op.pattern, err))
4753 73415719 Iustin Pop
4754 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4755 73415719 Iustin Pop
    """Returns the tag list.
4756 73415719 Iustin Pop

4757 73415719 Iustin Pop
    """
4758 73415719 Iustin Pop
    cfg = self.cfg
4759 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4760 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4761 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4762 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4763 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4764 73415719 Iustin Pop
    results = []
4765 73415719 Iustin Pop
    for path, target in tgts:
4766 73415719 Iustin Pop
      for tag in target.GetTags():
4767 73415719 Iustin Pop
        if self.re.search(tag):
4768 73415719 Iustin Pop
          results.append((path, tag))
4769 73415719 Iustin Pop
    return results
4770 73415719 Iustin Pop
4771 73415719 Iustin Pop
4772 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4773 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4774 5c947f38 Iustin Pop

4775 5c947f38 Iustin Pop
  """
4776 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4777 5c947f38 Iustin Pop
4778 5c947f38 Iustin Pop
  def CheckPrereq(self):
4779 5c947f38 Iustin Pop
    """Check prerequisites.
4780 5c947f38 Iustin Pop

4781 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4782 5c947f38 Iustin Pop

4783 5c947f38 Iustin Pop
    """
4784 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4785 f27302fa Iustin Pop
    for tag in self.op.tags:
4786 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4787 5c947f38 Iustin Pop
4788 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4789 5c947f38 Iustin Pop
    """Sets the tag.
4790 5c947f38 Iustin Pop

4791 5c947f38 Iustin Pop
    """
4792 5c947f38 Iustin Pop
    try:
4793 f27302fa Iustin Pop
      for tag in self.op.tags:
4794 f27302fa Iustin Pop
        self.target.AddTag(tag)
4795 5c947f38 Iustin Pop
    except errors.TagError, err:
4796 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4797 5c947f38 Iustin Pop
    try:
4798 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4799 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4800 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4801 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4802 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4803 5c947f38 Iustin Pop
4804 5c947f38 Iustin Pop
4805 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4806 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4807 5c947f38 Iustin Pop

4808 5c947f38 Iustin Pop
  """
4809 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4810 5c947f38 Iustin Pop
4811 5c947f38 Iustin Pop
  def CheckPrereq(self):
4812 5c947f38 Iustin Pop
    """Check prerequisites.
4813 5c947f38 Iustin Pop

4814 5c947f38 Iustin Pop
    This checks that we have the given tag.
4815 5c947f38 Iustin Pop

4816 5c947f38 Iustin Pop
    """
4817 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4818 f27302fa Iustin Pop
    for tag in self.op.tags:
4819 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4820 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4821 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4822 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4823 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4824 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4825 f27302fa Iustin Pop
      diff_names.sort()
4826 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4827 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4828 5c947f38 Iustin Pop
4829 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4830 5c947f38 Iustin Pop
    """Remove the tag from the object.
4831 5c947f38 Iustin Pop

4832 5c947f38 Iustin Pop
    """
4833 f27302fa Iustin Pop
    for tag in self.op.tags:
4834 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4835 5c947f38 Iustin Pop
    try:
4836 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4837 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4838 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4839 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4840 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4841 06009e27 Iustin Pop
4842 0eed6e61 Guido Trotter
4843 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
4844 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
4845 06009e27 Iustin Pop

4846 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
4847 06009e27 Iustin Pop
  time.
4848 06009e27 Iustin Pop

4849 06009e27 Iustin Pop
  """
4850 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
4851 fbe9022f Guido Trotter
  REQ_BGL = False
4852 06009e27 Iustin Pop
4853 fbe9022f Guido Trotter
  def ExpandNames(self):
4854 fbe9022f Guido Trotter
    """Expand names and set required locks.
4855 06009e27 Iustin Pop

4856 fbe9022f Guido Trotter
    This expands the node list, if any.
4857 06009e27 Iustin Pop

4858 06009e27 Iustin Pop
    """
4859 fbe9022f Guido Trotter
    self.needed_locks = {}
4860 06009e27 Iustin Pop
    if self.op.on_nodes:
4861 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
4862 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
4863 fbe9022f Guido Trotter
      # more information.
4864 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
4865 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
4866 fbe9022f Guido Trotter
4867 fbe9022f Guido Trotter
  def CheckPrereq(self):
4868 fbe9022f Guido Trotter
    """Check prerequisites.
4869 fbe9022f Guido Trotter

4870 fbe9022f Guido Trotter
    """
4871 06009e27 Iustin Pop
4872 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
4873 06009e27 Iustin Pop
    """Do the actual sleep.
4874 06009e27 Iustin Pop

4875 06009e27 Iustin Pop
    """
4876 06009e27 Iustin Pop
    if self.op.on_master:
4877 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
4878 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
4879 06009e27 Iustin Pop
    if self.op.on_nodes:
4880 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
4881 06009e27 Iustin Pop
      if not result:
4882 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
4883 06009e27 Iustin Pop
      for node, node_result in result.items():
4884 06009e27 Iustin Pop
        if not node_result:
4885 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
4886 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
4887 d61df03e Iustin Pop
4888 d61df03e Iustin Pop
4889 d1c2dd75 Iustin Pop
class IAllocator(object):
4890 d1c2dd75 Iustin Pop
  """IAllocator framework.
4891 d61df03e Iustin Pop

4892 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
4893 d1c2dd75 Iustin Pop
    - cfg/sstore that are needed to query the cluster
4894 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
4895 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
4896 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
4897 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
4898 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
4899 d1c2dd75 Iustin Pop
      easy usage
4900 d61df03e Iustin Pop

4901 d61df03e Iustin Pop
  """
4902 29859cb7 Iustin Pop
  _ALLO_KEYS = [
4903 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
4904 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
4905 d1c2dd75 Iustin Pop
    ]
4906 29859cb7 Iustin Pop
  _RELO_KEYS = [
4907 29859cb7 Iustin Pop
    "relocate_from",
4908 29859cb7 Iustin Pop
    ]
4909 d1c2dd75 Iustin Pop
4910 29859cb7 Iustin Pop
  def __init__(self, cfg, sstore, mode, name, **kwargs):
4911 d1c2dd75 Iustin Pop
    self.cfg = cfg
4912 d1c2dd75 Iustin Pop
    self.sstore = sstore
4913 d1c2dd75 Iustin Pop
    # init buffer variables
4914 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
4915 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
4916 29859cb7 Iustin Pop
    self.mode = mode
4917 29859cb7 Iustin Pop
    self.name = name
4918 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
4919 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
4920 29859cb7 Iustin Pop
    self.relocate_from = None
4921 27579978 Iustin Pop
    # computed fields
4922 27579978 Iustin Pop
    self.required_nodes = None
4923 d1c2dd75 Iustin Pop
    # init result fields
4924 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
4925 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
4926 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
4927 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
4928 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
4929 29859cb7 Iustin Pop
    else:
4930 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
4931 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
4932 d1c2dd75 Iustin Pop
    for key in kwargs:
4933 29859cb7 Iustin Pop
      if key not in keyset:
4934 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
4935 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4936 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
4937 29859cb7 Iustin Pop
    for key in keyset:
4938 d1c2dd75 Iustin Pop
      if key not in kwargs:
4939 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
4940 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4941 d1c2dd75 Iustin Pop
    self._BuildInputData()
4942 d1c2dd75 Iustin Pop
4943 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
4944 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
4945 d1c2dd75 Iustin Pop

4946 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
4947 d1c2dd75 Iustin Pop

4948 d1c2dd75 Iustin Pop
    """
4949 d1c2dd75 Iustin Pop
    cfg = self.cfg
4950 d1c2dd75 Iustin Pop
    # cluster data
4951 d1c2dd75 Iustin Pop
    data = {
4952 d1c2dd75 Iustin Pop
      "version": 1,
4953 d1c2dd75 Iustin Pop
      "cluster_name": self.sstore.GetClusterName(),
4954 d1c2dd75 Iustin Pop
      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
4955 6286519f Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
4956 d1c2dd75 Iustin Pop
      # we don't have job IDs
4957 d61df03e Iustin Pop
      }
4958 d61df03e Iustin Pop
4959 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
4960 6286519f Iustin Pop
4961 d1c2dd75 Iustin Pop
    # node data
4962 d1c2dd75 Iustin Pop
    node_results = {}
4963 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
4964 d1c2dd75 Iustin Pop
    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
4965 d1c2dd75 Iustin Pop
    for nname in node_list:
4966 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
4967 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
4968 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
4969 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
4970 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
4971 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
4972 d1c2dd75 Iustin Pop
        if attr not in remote_info:
4973 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
4974 d1c2dd75 Iustin Pop
                                   (nname, attr))
4975 d1c2dd75 Iustin Pop
        try:
4976 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
4977 d1c2dd75 Iustin Pop
        except ValueError, err:
4978 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
4979 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
4980 6286519f Iustin Pop
      # compute memory used by primary instances
4981 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
4982 6286519f Iustin Pop
      for iinfo in i_list:
4983 6286519f Iustin Pop
        if iinfo.primary_node == nname:
4984 6286519f Iustin Pop
          i_p_mem += iinfo.memory
4985 6286519f Iustin Pop
          if iinfo.status == "up":
4986 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
4987 6286519f Iustin Pop
4988 b2662e7f Iustin Pop
      # compute memory used by instances
4989 d1c2dd75 Iustin Pop
      pnr = {
4990 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
4991 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
4992 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
4993 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
4994 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
4995 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
4996 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
4997 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
4998 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
4999 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
5000 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
5001 d1c2dd75 Iustin Pop
        }
5002 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
5003 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
5004 d1c2dd75 Iustin Pop
5005 d1c2dd75 Iustin Pop
    # instance data
5006 d1c2dd75 Iustin Pop
    instance_data = {}
5007 6286519f Iustin Pop
    for iinfo in i_list:
5008 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5009 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
5010 d1c2dd75 Iustin Pop
      pir = {
5011 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
5012 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
5013 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
5014 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
5015 d1c2dd75 Iustin Pop
        "os": iinfo.os,
5016 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5017 d1c2dd75 Iustin Pop
        "nics": nic_data,
5018 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5019 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
5020 d1c2dd75 Iustin Pop
        }
5021 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
5022 d61df03e Iustin Pop
5023 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
5024 d61df03e Iustin Pop
5025 d1c2dd75 Iustin Pop
    self.in_data = data
5026 d61df03e Iustin Pop
5027 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
5028 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5029 d61df03e Iustin Pop

5030 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5031 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5032 d61df03e Iustin Pop

5033 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5034 d1c2dd75 Iustin Pop
    done.
5035 d61df03e Iustin Pop

5036 d1c2dd75 Iustin Pop
    """
5037 d1c2dd75 Iustin Pop
    data = self.in_data
5038 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5039 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5040 d1c2dd75 Iustin Pop
5041 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
5042 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
5043 d1c2dd75 Iustin Pop
5044 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5045 27579978 Iustin Pop
      self.required_nodes = 2
5046 27579978 Iustin Pop
    else:
5047 27579978 Iustin Pop
      self.required_nodes = 1
5048 d1c2dd75 Iustin Pop
    request = {
5049 d1c2dd75 Iustin Pop
      "type": "allocate",
5050 d1c2dd75 Iustin Pop
      "name": self.name,
5051 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5052 d1c2dd75 Iustin Pop
      "tags": self.tags,
5053 d1c2dd75 Iustin Pop
      "os": self.os,
5054 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5055 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5056 d1c2dd75 Iustin Pop
      "disks": self.disks,
5057 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5058 d1c2dd75 Iustin Pop
      "nics": self.nics,
5059 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5060 d1c2dd75 Iustin Pop
      }
5061 d1c2dd75 Iustin Pop
    data["request"] = request
5062 298fe380 Iustin Pop
5063 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5064 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5065 298fe380 Iustin Pop

5066 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5067 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5068 d61df03e Iustin Pop

5069 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5070 d1c2dd75 Iustin Pop
    done.
5071 d61df03e Iustin Pop

5072 d1c2dd75 Iustin Pop
    """
5073 27579978 Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.name)
5074 27579978 Iustin Pop
    if instance is None:
5075 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5076 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5077 27579978 Iustin Pop
5078 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5079 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5080 27579978 Iustin Pop
5081 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5082 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5083 2a139bb0 Iustin Pop
5084 27579978 Iustin Pop
    self.required_nodes = 1
5085 27579978 Iustin Pop
5086 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
5087 27579978 Iustin Pop
                                  instance.disks[0].size,
5088 27579978 Iustin Pop
                                  instance.disks[1].size)
5089 27579978 Iustin Pop
5090 d1c2dd75 Iustin Pop
    request = {
5091 2a139bb0 Iustin Pop
      "type": "relocate",
5092 d1c2dd75 Iustin Pop
      "name": self.name,
5093 27579978 Iustin Pop
      "disk_space_total": disk_space,
5094 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5095 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5096 d1c2dd75 Iustin Pop
      }
5097 27579978 Iustin Pop
    self.in_data["request"] = request
5098 d61df03e Iustin Pop
5099 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5100 d1c2dd75 Iustin Pop
    """Build input data structures.
5101 d61df03e Iustin Pop

5102 d1c2dd75 Iustin Pop
    """
5103 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5104 d61df03e Iustin Pop
5105 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5106 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5107 d1c2dd75 Iustin Pop
    else:
5108 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5109 d61df03e Iustin Pop
5110 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5111 d61df03e Iustin Pop
5112 8d528b7c Iustin Pop
  def Run(self, name, validate=True, call_fn=rpc.call_iallocator_runner):
5113 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5114 298fe380 Iustin Pop

5115 d1c2dd75 Iustin Pop
    """
5116 d1c2dd75 Iustin Pop
    data = self.in_text
5117 298fe380 Iustin Pop
5118 8d528b7c Iustin Pop
    result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
5119 298fe380 Iustin Pop
5120 43f5ea7a Guido Trotter
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5121 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5122 8d528b7c Iustin Pop
5123 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5124 8d528b7c Iustin Pop
5125 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5126 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5127 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5128 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5129 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5130 8d528b7c Iustin Pop
    self.out_text = stdout
5131 d1c2dd75 Iustin Pop
    if validate:
5132 d1c2dd75 Iustin Pop
      self._ValidateResult()
5133 298fe380 Iustin Pop
5134 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5135 d1c2dd75 Iustin Pop
    """Process the allocator results.
5136 538475ca Iustin Pop

5137 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5138 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5139 538475ca Iustin Pop

5140 d1c2dd75 Iustin Pop
    """
5141 d1c2dd75 Iustin Pop
    try:
5142 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5143 d1c2dd75 Iustin Pop
    except Exception, err:
5144 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5145 d1c2dd75 Iustin Pop
5146 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5147 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5148 538475ca Iustin Pop
5149 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5150 d1c2dd75 Iustin Pop
      if key not in rdict:
5151 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5152 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5153 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5154 538475ca Iustin Pop
5155 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5156 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5157 d1c2dd75 Iustin Pop
                               " is not a list")
5158 d1c2dd75 Iustin Pop
    self.out_data = rdict
5159 538475ca Iustin Pop
5160 538475ca Iustin Pop
5161 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5162 d61df03e Iustin Pop
  """Run allocator tests.
5163 d61df03e Iustin Pop

5164 d61df03e Iustin Pop
  This LU runs the allocator tests
5165 d61df03e Iustin Pop

5166 d61df03e Iustin Pop
  """
5167 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5168 d61df03e Iustin Pop
5169 d61df03e Iustin Pop
  def CheckPrereq(self):
5170 d61df03e Iustin Pop
    """Check prerequisites.
5171 d61df03e Iustin Pop

5172 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5173 d61df03e Iustin Pop

5174 d61df03e Iustin Pop
    """
5175 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5176 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5177 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5178 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5179 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5180 d61df03e Iustin Pop
                                     attr)
5181 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5182 d61df03e Iustin Pop
      if iname is not None:
5183 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5184 d61df03e Iustin Pop
                                   iname)
5185 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5186 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5187 d61df03e Iustin Pop
      for row in self.op.nics:
5188 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5189 d61df03e Iustin Pop
            "mac" not in row or
5190 d61df03e Iustin Pop
            "ip" not in row or
5191 d61df03e Iustin Pop
            "bridge" not in row):
5192 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5193 d61df03e Iustin Pop
                                     " 'nics' parameter")
5194 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5195 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5196 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5197 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5198 d61df03e Iustin Pop
      for row in self.op.disks:
5199 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5200 d61df03e Iustin Pop
            "size" not in row or
5201 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5202 d61df03e Iustin Pop
            "mode" not in row or
5203 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5204 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5205 d61df03e Iustin Pop
                                     " 'disks' parameter")
5206 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5207 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5208 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5209 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5210 d61df03e Iustin Pop
      if fname is None:
5211 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5212 d61df03e Iustin Pop
                                   self.op.name)
5213 d61df03e Iustin Pop
      self.op.name = fname
5214 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5215 d61df03e Iustin Pop
    else:
5216 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5217 d61df03e Iustin Pop
                                 self.op.mode)
5218 d61df03e Iustin Pop
5219 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5220 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5221 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5222 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5223 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5224 d61df03e Iustin Pop
                                 self.op.direction)
5225 d61df03e Iustin Pop
5226 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5227 d61df03e Iustin Pop
    """Run the allocator test.
5228 d61df03e Iustin Pop

5229 d61df03e Iustin Pop
    """
5230 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5231 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5232 29859cb7 Iustin Pop
                       mode=self.op.mode,
5233 29859cb7 Iustin Pop
                       name=self.op.name,
5234 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5235 29859cb7 Iustin Pop
                       disks=self.op.disks,
5236 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5237 29859cb7 Iustin Pop
                       os=self.op.os,
5238 29859cb7 Iustin Pop
                       tags=self.op.tags,
5239 29859cb7 Iustin Pop
                       nics=self.op.nics,
5240 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5241 29859cb7 Iustin Pop
                       )
5242 29859cb7 Iustin Pop
    else:
5243 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5244 29859cb7 Iustin Pop
                       mode=self.op.mode,
5245 29859cb7 Iustin Pop
                       name=self.op.name,
5246 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5247 29859cb7 Iustin Pop
                       )
5248 d61df03e Iustin Pop
5249 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5250 d1c2dd75 Iustin Pop
      result = ial.in_text
5251 298fe380 Iustin Pop
    else:
5252 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5253 d1c2dd75 Iustin Pop
      result = ial.out_text
5254 298fe380 Iustin Pop
    return result