Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ c4a2fee1

History | View | Annotate | Download (171.8 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import config
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 a8083063 Iustin Pop
from ganeti import ssconf
46 8d14b30d Iustin Pop
from ganeti import serializer
47 d61df03e Iustin Pop
48 d61df03e Iustin Pop
49 a8083063 Iustin Pop
class LogicalUnit(object):
50 396e1b78 Michael Hanselmann
  """Logical Unit base class.
51 a8083063 Iustin Pop

52 a8083063 Iustin Pop
  Subclasses must follow these rules:
53 d465bdc8 Guido Trotter
    - implement ExpandNames
54 d465bdc8 Guido Trotter
    - implement CheckPrereq
55 a8083063 Iustin Pop
    - implement Exec
56 a8083063 Iustin Pop
    - implement BuildHooksEnv
57 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
58 05f86716 Guido Trotter
    - optionally redefine their run requirements:
59 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
60 05f86716 Guido Trotter
        REQ_WSSTORE: the LU needs a writable SimpleStore
61 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
62 05f86716 Guido Trotter

63 05f86716 Guido Trotter
  Note that all commands require root permissions.
64 a8083063 Iustin Pop

65 a8083063 Iustin Pop
  """
66 a8083063 Iustin Pop
  HPATH = None
67 a8083063 Iustin Pop
  HTYPE = None
68 a8083063 Iustin Pop
  _OP_REQP = []
69 a8083063 Iustin Pop
  REQ_MASTER = True
70 05f86716 Guido Trotter
  REQ_WSSTORE = False
71 7e55040e Guido Trotter
  REQ_BGL = True
72 a8083063 Iustin Pop
73 77b657a3 Guido Trotter
  def __init__(self, processor, op, context, sstore):
74 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
75 a8083063 Iustin Pop

76 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
77 a8083063 Iustin Pop
    validity.
78 a8083063 Iustin Pop

79 a8083063 Iustin Pop
    """
80 5bfac263 Iustin Pop
    self.proc = processor
81 a8083063 Iustin Pop
    self.op = op
82 77b657a3 Guido Trotter
    self.cfg = context.cfg
83 a8083063 Iustin Pop
    self.sstore = sstore
84 77b657a3 Guido Trotter
    self.context = context
85 d465bdc8 Guido Trotter
    self.needed_locks = None
86 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
87 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
88 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
89 c92b310a Michael Hanselmann
    self.__ssh = None
90 c92b310a Michael Hanselmann
91 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
92 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
93 a8083063 Iustin Pop
      if attr_val is None:
94 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
95 3ecf6786 Iustin Pop
                                   attr_name)
96 c6d58a2b Michael Hanselmann
97 f64c9de6 Guido Trotter
    if not self.cfg.IsCluster():
98 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
99 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
100 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
101 c6d58a2b Michael Hanselmann
      master = sstore.GetMasterNode()
102 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
103 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
104 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
105 a8083063 Iustin Pop
106 c92b310a Michael Hanselmann
  def __GetSSH(self):
107 c92b310a Michael Hanselmann
    """Returns the SshRunner object
108 c92b310a Michael Hanselmann

109 c92b310a Michael Hanselmann
    """
110 c92b310a Michael Hanselmann
    if not self.__ssh:
111 1ff08570 Michael Hanselmann
      self.__ssh = ssh.SshRunner(self.sstore)
112 c92b310a Michael Hanselmann
    return self.__ssh
113 c92b310a Michael Hanselmann
114 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
115 c92b310a Michael Hanselmann
116 d465bdc8 Guido Trotter
  def ExpandNames(self):
117 d465bdc8 Guido Trotter
    """Expand names for this LU.
118 d465bdc8 Guido Trotter

119 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
120 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
121 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
122 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
123 d465bdc8 Guido Trotter

124 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
125 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
126 d465bdc8 Guido Trotter
    as values. Rules:
127 d465bdc8 Guido Trotter
      - Use an empty dict if you don't need any lock
128 d465bdc8 Guido Trotter
      - If you don't need any lock at a particular level omit that level
129 d465bdc8 Guido Trotter
      - Don't put anything for the BGL level
130 d465bdc8 Guido Trotter
      - If you want all locks at a level use None as a value
131 d465bdc8 Guido Trotter
        (this reflects what LockSet does, and will be replaced before
132 d465bdc8 Guido Trotter
        CheckPrereq with the full list of nodes that have been locked)
133 d465bdc8 Guido Trotter

134 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
135 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
136 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
137 3977a4c1 Guido Trotter

138 d465bdc8 Guido Trotter
    Examples:
139 d465bdc8 Guido Trotter
    # Acquire all nodes and one instance
140 d465bdc8 Guido Trotter
    self.needed_locks = {
141 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: None,
142 d465bdc8 Guido Trotter
      locking.LEVEL_INSTANCES: ['instance1.example.tld'],
143 d465bdc8 Guido Trotter
    }
144 d465bdc8 Guido Trotter
    # Acquire just two nodes
145 d465bdc8 Guido Trotter
    self.needed_locks = {
146 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
147 d465bdc8 Guido Trotter
    }
148 d465bdc8 Guido Trotter
    # Acquire no locks
149 d465bdc8 Guido Trotter
    self.needed_locks = {} # No, you can't leave it to the default value None
150 d465bdc8 Guido Trotter

151 d465bdc8 Guido Trotter
    """
152 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
153 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
154 d465bdc8 Guido Trotter
    # time.
155 d465bdc8 Guido Trotter
    if self.REQ_BGL:
156 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
157 d465bdc8 Guido Trotter
    else:
158 d465bdc8 Guido Trotter
      raise NotImplementedError
159 d465bdc8 Guido Trotter
160 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
161 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
162 fb8dcb62 Guido Trotter

163 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
164 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
165 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
166 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
167 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
168 fb8dcb62 Guido Trotter
    default it does nothing.
169 fb8dcb62 Guido Trotter

170 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
171 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
172 fb8dcb62 Guido Trotter

173 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
174 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
175 fb8dcb62 Guido Trotter

176 fb8dcb62 Guido Trotter
    """
177 fb8dcb62 Guido Trotter
178 a8083063 Iustin Pop
  def CheckPrereq(self):
179 a8083063 Iustin Pop
    """Check prerequisites for this LU.
180 a8083063 Iustin Pop

181 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
182 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
183 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
184 a8083063 Iustin Pop
    allowed.
185 a8083063 Iustin Pop

186 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
187 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
188 a8083063 Iustin Pop

189 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
190 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
191 a8083063 Iustin Pop

192 a8083063 Iustin Pop
    """
193 a8083063 Iustin Pop
    raise NotImplementedError
194 a8083063 Iustin Pop
195 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
196 a8083063 Iustin Pop
    """Execute the LU.
197 a8083063 Iustin Pop

198 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
199 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
200 a8083063 Iustin Pop
    code, or expected.
201 a8083063 Iustin Pop

202 a8083063 Iustin Pop
    """
203 a8083063 Iustin Pop
    raise NotImplementedError
204 a8083063 Iustin Pop
205 a8083063 Iustin Pop
  def BuildHooksEnv(self):
206 a8083063 Iustin Pop
    """Build hooks environment for this LU.
207 a8083063 Iustin Pop

208 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
209 a8083063 Iustin Pop
    containing the environment that will be used for running the
210 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
211 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
212 a8083063 Iustin Pop
    the hook should run after the execution.
213 a8083063 Iustin Pop

214 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
215 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
216 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
217 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
218 a8083063 Iustin Pop

219 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
220 a8083063 Iustin Pop

221 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
222 a8083063 Iustin Pop
    not be called.
223 a8083063 Iustin Pop

224 a8083063 Iustin Pop
    """
225 a8083063 Iustin Pop
    raise NotImplementedError
226 a8083063 Iustin Pop
227 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
228 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
229 1fce5219 Guido Trotter

230 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
231 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
232 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
233 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
234 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
235 1fce5219 Guido Trotter

236 1fce5219 Guido Trotter
    Args:
237 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
238 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
239 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
240 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
241 1fce5219 Guido Trotter

242 1fce5219 Guido Trotter
    """
243 1fce5219 Guido Trotter
    return lu_result
244 1fce5219 Guido Trotter
245 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
246 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
247 43905206 Guido Trotter

248 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
249 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
250 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
251 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
252 43905206 Guido Trotter
    before.
253 43905206 Guido Trotter

254 43905206 Guido Trotter
    """
255 43905206 Guido Trotter
    if self.needed_locks is None:
256 43905206 Guido Trotter
      self.needed_locks = {}
257 43905206 Guido Trotter
    else:
258 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
259 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
260 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
261 43905206 Guido Trotter
    if expanded_name is None:
262 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
263 43905206 Guido Trotter
                                  self.op.instance_name)
264 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
265 43905206 Guido Trotter
    self.op.instance_name = expanded_name
266 43905206 Guido Trotter
267 c4a2fee1 Guido Trotter
  def _LockInstancesNodes(self):
268 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
269 c4a2fee1 Guido Trotter

270 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
271 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
272 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
273 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
274 c4a2fee1 Guido Trotter

275 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
276 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
277 c4a2fee1 Guido Trotter

278 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
279 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
280 c4a2fee1 Guido Trotter

281 c4a2fee1 Guido Trotter
    If should be called in DeclareLocks in a way similar to:
282 c4a2fee1 Guido Trotter

283 c4a2fee1 Guido Trotter
    if level == locking.LEVEL_NODE:
284 c4a2fee1 Guido Trotter
      self._LockInstancesNodes()
285 c4a2fee1 Guido Trotter

286 c4a2fee1 Guido Trotter
    """
287 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
288 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
289 c4a2fee1 Guido Trotter
290 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
291 c4a2fee1 Guido Trotter
292 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
293 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
294 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
295 c4a2fee1 Guido Trotter
    wanted_nodes = []
296 c4a2fee1 Guido Trotter
    for instance_name in self.needed_locks[locking.LEVEL_INSTANCE]:
297 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
298 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
299 c4a2fee1 Guido Trotter
      wanted_nodes.extend(instance.secondary_nodes)
300 c4a2fee1 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
301 c4a2fee1 Guido Trotter
302 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
303 c4a2fee1 Guido Trotter
304 a8083063 Iustin Pop
305 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
306 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
307 a8083063 Iustin Pop

308 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
309 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
310 a8083063 Iustin Pop

311 a8083063 Iustin Pop
  """
312 a8083063 Iustin Pop
  HPATH = None
313 a8083063 Iustin Pop
  HTYPE = None
314 a8083063 Iustin Pop
315 a8083063 Iustin Pop
316 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
317 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
318 83120a01 Michael Hanselmann

319 83120a01 Michael Hanselmann
  Args:
320 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
321 83120a01 Michael Hanselmann

322 83120a01 Michael Hanselmann
  """
323 3312b702 Iustin Pop
  if not isinstance(nodes, list):
324 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
325 dcb93971 Michael Hanselmann
326 dcb93971 Michael Hanselmann
  if nodes:
327 3312b702 Iustin Pop
    wanted = []
328 dcb93971 Michael Hanselmann
329 dcb93971 Michael Hanselmann
    for name in nodes:
330 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
331 dcb93971 Michael Hanselmann
      if node is None:
332 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
333 3312b702 Iustin Pop
      wanted.append(node)
334 dcb93971 Michael Hanselmann
335 dcb93971 Michael Hanselmann
  else:
336 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
337 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
338 3312b702 Iustin Pop
339 3312b702 Iustin Pop
340 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
341 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
342 3312b702 Iustin Pop

343 3312b702 Iustin Pop
  Args:
344 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
345 3312b702 Iustin Pop

346 3312b702 Iustin Pop
  """
347 3312b702 Iustin Pop
  if not isinstance(instances, list):
348 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
349 3312b702 Iustin Pop
350 3312b702 Iustin Pop
  if instances:
351 3312b702 Iustin Pop
    wanted = []
352 3312b702 Iustin Pop
353 3312b702 Iustin Pop
    for name in instances:
354 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
355 3312b702 Iustin Pop
      if instance is None:
356 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
357 3312b702 Iustin Pop
      wanted.append(instance)
358 3312b702 Iustin Pop
359 3312b702 Iustin Pop
  else:
360 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
361 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
362 dcb93971 Michael Hanselmann
363 dcb93971 Michael Hanselmann
364 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
365 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
366 83120a01 Michael Hanselmann

367 83120a01 Michael Hanselmann
  Args:
368 83120a01 Michael Hanselmann
    static: Static fields
369 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
370 83120a01 Michael Hanselmann

371 83120a01 Michael Hanselmann
  """
372 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
373 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
374 dcb93971 Michael Hanselmann
375 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
376 dcb93971 Michael Hanselmann
377 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
378 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
379 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
380 3ecf6786 Iustin Pop
                                          difference(all_fields)))
381 dcb93971 Michael Hanselmann
382 dcb93971 Michael Hanselmann
383 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
384 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
385 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
386 ecb215b5 Michael Hanselmann

387 ecb215b5 Michael Hanselmann
  Args:
388 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
389 396e1b78 Michael Hanselmann
  """
390 396e1b78 Michael Hanselmann
  env = {
391 0e137c28 Iustin Pop
    "OP_TARGET": name,
392 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
393 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
394 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
395 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
396 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
397 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
398 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
399 396e1b78 Michael Hanselmann
  }
400 396e1b78 Michael Hanselmann
401 396e1b78 Michael Hanselmann
  if nics:
402 396e1b78 Michael Hanselmann
    nic_count = len(nics)
403 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
404 396e1b78 Michael Hanselmann
      if ip is None:
405 396e1b78 Michael Hanselmann
        ip = ""
406 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
407 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
408 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
409 396e1b78 Michael Hanselmann
  else:
410 396e1b78 Michael Hanselmann
    nic_count = 0
411 396e1b78 Michael Hanselmann
412 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
413 396e1b78 Michael Hanselmann
414 396e1b78 Michael Hanselmann
  return env
415 396e1b78 Michael Hanselmann
416 396e1b78 Michael Hanselmann
417 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
418 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
419 ecb215b5 Michael Hanselmann

420 ecb215b5 Michael Hanselmann
  Args:
421 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
422 ecb215b5 Michael Hanselmann
    override: dict of values to override
423 ecb215b5 Michael Hanselmann
  """
424 396e1b78 Michael Hanselmann
  args = {
425 396e1b78 Michael Hanselmann
    'name': instance.name,
426 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
427 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
428 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
429 396e1b78 Michael Hanselmann
    'status': instance.os,
430 396e1b78 Michael Hanselmann
    'memory': instance.memory,
431 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
432 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
433 396e1b78 Michael Hanselmann
  }
434 396e1b78 Michael Hanselmann
  if override:
435 396e1b78 Michael Hanselmann
    args.update(override)
436 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
437 396e1b78 Michael Hanselmann
438 396e1b78 Michael Hanselmann
439 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
440 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
441 bf6929a2 Alexander Schreiber

442 bf6929a2 Alexander Schreiber
  """
443 bf6929a2 Alexander Schreiber
  # check bridges existance
444 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
445 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
446 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
447 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
448 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
449 bf6929a2 Alexander Schreiber
450 bf6929a2 Alexander Schreiber
451 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
452 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
453 a8083063 Iustin Pop

454 a8083063 Iustin Pop
  """
455 a8083063 Iustin Pop
  _OP_REQP = []
456 a8083063 Iustin Pop
457 a8083063 Iustin Pop
  def CheckPrereq(self):
458 a8083063 Iustin Pop
    """Check prerequisites.
459 a8083063 Iustin Pop

460 a8083063 Iustin Pop
    This checks whether the cluster is empty.
461 a8083063 Iustin Pop

462 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
463 a8083063 Iustin Pop

464 a8083063 Iustin Pop
    """
465 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
466 a8083063 Iustin Pop
467 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
468 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
469 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
470 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
471 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
472 db915bd1 Michael Hanselmann
    if instancelist:
473 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
474 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
475 a8083063 Iustin Pop
476 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
477 a8083063 Iustin Pop
    """Destroys the cluster.
478 a8083063 Iustin Pop

479 a8083063 Iustin Pop
    """
480 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
481 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
482 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
483 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
484 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
485 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
486 c8a0948f Michael Hanselmann
    rpc.call_node_leave_cluster(master)
487 a8083063 Iustin Pop
488 a8083063 Iustin Pop
489 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
490 a8083063 Iustin Pop
  """Verifies the cluster status.
491 a8083063 Iustin Pop

492 a8083063 Iustin Pop
  """
493 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
494 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
495 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
496 a8083063 Iustin Pop
497 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
498 a8083063 Iustin Pop
                  remote_version, feedback_fn):
499 a8083063 Iustin Pop
    """Run multiple tests against a node.
500 a8083063 Iustin Pop

501 a8083063 Iustin Pop
    Test list:
502 a8083063 Iustin Pop
      - compares ganeti version
503 a8083063 Iustin Pop
      - checks vg existance and size > 20G
504 a8083063 Iustin Pop
      - checks config file checksum
505 a8083063 Iustin Pop
      - checks ssh to other nodes
506 a8083063 Iustin Pop

507 a8083063 Iustin Pop
    Args:
508 a8083063 Iustin Pop
      node: name of the node to check
509 a8083063 Iustin Pop
      file_list: required list of files
510 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
511 098c0958 Michael Hanselmann

512 a8083063 Iustin Pop
    """
513 a8083063 Iustin Pop
    # compares ganeti version
514 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
515 a8083063 Iustin Pop
    if not remote_version:
516 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
517 a8083063 Iustin Pop
      return True
518 a8083063 Iustin Pop
519 a8083063 Iustin Pop
    if local_version != remote_version:
520 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
521 a8083063 Iustin Pop
                      (local_version, node, remote_version))
522 a8083063 Iustin Pop
      return True
523 a8083063 Iustin Pop
524 a8083063 Iustin Pop
    # checks vg existance and size > 20G
525 a8083063 Iustin Pop
526 a8083063 Iustin Pop
    bad = False
527 a8083063 Iustin Pop
    if not vglist:
528 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
529 a8083063 Iustin Pop
                      (node,))
530 a8083063 Iustin Pop
      bad = True
531 a8083063 Iustin Pop
    else:
532 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
533 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
534 a8083063 Iustin Pop
      if vgstatus:
535 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
536 a8083063 Iustin Pop
        bad = True
537 a8083063 Iustin Pop
538 a8083063 Iustin Pop
    # checks config file checksum
539 a8083063 Iustin Pop
    # checks ssh to any
540 a8083063 Iustin Pop
541 a8083063 Iustin Pop
    if 'filelist' not in node_result:
542 a8083063 Iustin Pop
      bad = True
543 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
544 a8083063 Iustin Pop
    else:
545 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
546 a8083063 Iustin Pop
      for file_name in file_list:
547 a8083063 Iustin Pop
        if file_name not in remote_cksum:
548 a8083063 Iustin Pop
          bad = True
549 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
550 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
551 a8083063 Iustin Pop
          bad = True
552 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
553 a8083063 Iustin Pop
554 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
555 a8083063 Iustin Pop
      bad = True
556 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
557 a8083063 Iustin Pop
    else:
558 a8083063 Iustin Pop
      if node_result['nodelist']:
559 a8083063 Iustin Pop
        bad = True
560 a8083063 Iustin Pop
        for node in node_result['nodelist']:
561 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
562 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
563 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
564 9d4bfc96 Iustin Pop
      bad = True
565 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
566 9d4bfc96 Iustin Pop
    else:
567 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
568 9d4bfc96 Iustin Pop
        bad = True
569 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
570 9d4bfc96 Iustin Pop
        for node in nlist:
571 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
572 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
573 9d4bfc96 Iustin Pop
574 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
575 a8083063 Iustin Pop
    if hyp_result is not None:
576 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
577 a8083063 Iustin Pop
    return bad
578 a8083063 Iustin Pop
579 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
580 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
581 a8083063 Iustin Pop
    """Verify an instance.
582 a8083063 Iustin Pop

583 a8083063 Iustin Pop
    This function checks to see if the required block devices are
584 a8083063 Iustin Pop
    available on the instance's node.
585 a8083063 Iustin Pop

586 a8083063 Iustin Pop
    """
587 a8083063 Iustin Pop
    bad = False
588 a8083063 Iustin Pop
589 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
590 a8083063 Iustin Pop
591 a8083063 Iustin Pop
    node_vol_should = {}
592 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
593 a8083063 Iustin Pop
594 a8083063 Iustin Pop
    for node in node_vol_should:
595 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
596 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
597 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
598 a8083063 Iustin Pop
                          (volume, node))
599 a8083063 Iustin Pop
          bad = True
600 a8083063 Iustin Pop
601 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
602 a872dae6 Guido Trotter
      if (node_current not in node_instance or
603 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
604 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
605 a8083063 Iustin Pop
                        (instance, node_current))
606 a8083063 Iustin Pop
        bad = True
607 a8083063 Iustin Pop
608 a8083063 Iustin Pop
    for node in node_instance:
609 a8083063 Iustin Pop
      if (not node == node_current):
610 a8083063 Iustin Pop
        if instance in node_instance[node]:
611 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
612 a8083063 Iustin Pop
                          (instance, node))
613 a8083063 Iustin Pop
          bad = True
614 a8083063 Iustin Pop
615 6a438c98 Michael Hanselmann
    return bad
616 a8083063 Iustin Pop
617 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
618 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
619 a8083063 Iustin Pop

620 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
621 a8083063 Iustin Pop
    reported as unknown.
622 a8083063 Iustin Pop

623 a8083063 Iustin Pop
    """
624 a8083063 Iustin Pop
    bad = False
625 a8083063 Iustin Pop
626 a8083063 Iustin Pop
    for node in node_vol_is:
627 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
628 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
629 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
630 a8083063 Iustin Pop
                      (volume, node))
631 a8083063 Iustin Pop
          bad = True
632 a8083063 Iustin Pop
    return bad
633 a8083063 Iustin Pop
634 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
635 a8083063 Iustin Pop
    """Verify the list of running instances.
636 a8083063 Iustin Pop

637 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
638 a8083063 Iustin Pop

639 a8083063 Iustin Pop
    """
640 a8083063 Iustin Pop
    bad = False
641 a8083063 Iustin Pop
    for node in node_instance:
642 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
643 a8083063 Iustin Pop
        if runninginstance not in instancelist:
644 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
645 a8083063 Iustin Pop
                          (runninginstance, node))
646 a8083063 Iustin Pop
          bad = True
647 a8083063 Iustin Pop
    return bad
648 a8083063 Iustin Pop
649 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
650 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
651 2b3b6ddd Guido Trotter

652 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
653 2b3b6ddd Guido Trotter
    was primary for.
654 2b3b6ddd Guido Trotter

655 2b3b6ddd Guido Trotter
    """
656 2b3b6ddd Guido Trotter
    bad = False
657 2b3b6ddd Guido Trotter
658 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
659 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
660 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
661 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
662 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
663 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
664 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
665 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
666 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
667 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
668 2b3b6ddd Guido Trotter
        needed_mem = 0
669 2b3b6ddd Guido Trotter
        for instance in instances:
670 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
671 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
672 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
673 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
674 2b3b6ddd Guido Trotter
          bad = True
675 2b3b6ddd Guido Trotter
    return bad
676 2b3b6ddd Guido Trotter
677 a8083063 Iustin Pop
  def CheckPrereq(self):
678 a8083063 Iustin Pop
    """Check prerequisites.
679 a8083063 Iustin Pop

680 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
681 e54c4c5e Guido Trotter
    all its members are valid.
682 a8083063 Iustin Pop

683 a8083063 Iustin Pop
    """
684 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
685 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
686 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
687 a8083063 Iustin Pop
688 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
689 d8fff41c Guido Trotter
    """Build hooks env.
690 d8fff41c Guido Trotter

691 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
692 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
693 d8fff41c Guido Trotter

694 d8fff41c Guido Trotter
    """
695 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
696 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
697 d8fff41c Guido Trotter
    env = {}
698 d8fff41c Guido Trotter
    return env, [], all_nodes
699 d8fff41c Guido Trotter
700 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
701 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
702 a8083063 Iustin Pop

703 a8083063 Iustin Pop
    """
704 a8083063 Iustin Pop
    bad = False
705 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
706 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
707 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
708 a8083063 Iustin Pop
709 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
710 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
711 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
712 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
713 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
714 a8083063 Iustin Pop
    node_volume = {}
715 a8083063 Iustin Pop
    node_instance = {}
716 9c9c7d30 Guido Trotter
    node_info = {}
717 26b6af5e Guido Trotter
    instance_cfg = {}
718 a8083063 Iustin Pop
719 a8083063 Iustin Pop
    # FIXME: verify OS list
720 a8083063 Iustin Pop
    # do local checksums
721 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
722 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
723 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
724 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
725 a8083063 Iustin Pop
726 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
727 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
728 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
729 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
730 a8083063 Iustin Pop
    node_verify_param = {
731 a8083063 Iustin Pop
      'filelist': file_names,
732 a8083063 Iustin Pop
      'nodelist': nodelist,
733 a8083063 Iustin Pop
      'hypervisor': None,
734 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
735 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
736 a8083063 Iustin Pop
      }
737 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
738 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
739 9c9c7d30 Guido Trotter
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
740 a8083063 Iustin Pop
741 a8083063 Iustin Pop
    for node in nodelist:
742 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
743 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
744 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
745 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
746 a8083063 Iustin Pop
      bad = bad or result
747 a8083063 Iustin Pop
748 a8083063 Iustin Pop
      # node_volume
749 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
750 a8083063 Iustin Pop
751 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
752 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
753 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
754 b63ed789 Iustin Pop
        bad = True
755 b63ed789 Iustin Pop
        node_volume[node] = {}
756 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
757 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
758 a8083063 Iustin Pop
        bad = True
759 a8083063 Iustin Pop
        continue
760 b63ed789 Iustin Pop
      else:
761 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
762 a8083063 Iustin Pop
763 a8083063 Iustin Pop
      # node_instance
764 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
765 a8083063 Iustin Pop
      if type(nodeinstance) != list:
766 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
767 a8083063 Iustin Pop
        bad = True
768 a8083063 Iustin Pop
        continue
769 a8083063 Iustin Pop
770 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
771 a8083063 Iustin Pop
772 9c9c7d30 Guido Trotter
      # node_info
773 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
774 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
775 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
776 9c9c7d30 Guido Trotter
        bad = True
777 9c9c7d30 Guido Trotter
        continue
778 9c9c7d30 Guido Trotter
779 9c9c7d30 Guido Trotter
      try:
780 9c9c7d30 Guido Trotter
        node_info[node] = {
781 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
782 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
783 93e4c50b Guido Trotter
          "pinst": [],
784 93e4c50b Guido Trotter
          "sinst": [],
785 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
786 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
787 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
788 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
789 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
790 36e7da50 Guido Trotter
          # secondary.
791 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
792 9c9c7d30 Guido Trotter
        }
793 9c9c7d30 Guido Trotter
      except ValueError:
794 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
795 9c9c7d30 Guido Trotter
        bad = True
796 9c9c7d30 Guido Trotter
        continue
797 9c9c7d30 Guido Trotter
798 a8083063 Iustin Pop
    node_vol_should = {}
799 a8083063 Iustin Pop
800 a8083063 Iustin Pop
    for instance in instancelist:
801 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
802 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
803 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
804 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
805 c5705f58 Guido Trotter
      bad = bad or result
806 a8083063 Iustin Pop
807 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
808 a8083063 Iustin Pop
809 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
810 26b6af5e Guido Trotter
811 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
812 93e4c50b Guido Trotter
      if pnode in node_info:
813 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
814 93e4c50b Guido Trotter
      else:
815 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
816 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
817 93e4c50b Guido Trotter
        bad = True
818 93e4c50b Guido Trotter
819 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
820 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
821 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
822 93e4c50b Guido Trotter
      # supported either.
823 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
824 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
825 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
826 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
827 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
828 93e4c50b Guido Trotter
                    % instance)
829 93e4c50b Guido Trotter
830 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
831 93e4c50b Guido Trotter
        if snode in node_info:
832 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
833 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
834 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
835 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
836 93e4c50b Guido Trotter
        else:
837 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
838 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
839 93e4c50b Guido Trotter
840 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
841 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
842 a8083063 Iustin Pop
                                       feedback_fn)
843 a8083063 Iustin Pop
    bad = bad or result
844 a8083063 Iustin Pop
845 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
846 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
847 a8083063 Iustin Pop
                                         feedback_fn)
848 a8083063 Iustin Pop
    bad = bad or result
849 a8083063 Iustin Pop
850 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
851 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
852 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
853 e54c4c5e Guido Trotter
      bad = bad or result
854 2b3b6ddd Guido Trotter
855 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
856 2b3b6ddd Guido Trotter
    if i_non_redundant:
857 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
858 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
859 2b3b6ddd Guido Trotter
860 a8083063 Iustin Pop
    return int(bad)
861 a8083063 Iustin Pop
862 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
863 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
864 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
865 d8fff41c Guido Trotter

866 d8fff41c Guido Trotter
    Args:
867 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
868 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
869 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
870 d8fff41c Guido Trotter
      lu_result: previous Exec result
871 d8fff41c Guido Trotter

872 d8fff41c Guido Trotter
    """
873 d8fff41c Guido Trotter
    # We only really run POST phase hooks, and are only interested in their results
874 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
875 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
876 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
877 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
878 d8fff41c Guido Trotter
      if not hooks_results:
879 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
880 d8fff41c Guido Trotter
        lu_result = 1
881 d8fff41c Guido Trotter
      else:
882 d8fff41c Guido Trotter
        for node_name in hooks_results:
883 d8fff41c Guido Trotter
          show_node_header = True
884 d8fff41c Guido Trotter
          res = hooks_results[node_name]
885 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
886 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
887 d8fff41c Guido Trotter
            lu_result = 1
888 d8fff41c Guido Trotter
            continue
889 d8fff41c Guido Trotter
          for script, hkr, output in res:
890 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
891 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
892 d8fff41c Guido Trotter
              # failing hooks on that node
893 d8fff41c Guido Trotter
              if show_node_header:
894 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
895 d8fff41c Guido Trotter
                show_node_header = False
896 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
897 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
898 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
899 d8fff41c Guido Trotter
              lu_result = 1
900 d8fff41c Guido Trotter
901 d8fff41c Guido Trotter
      return lu_result
902 d8fff41c Guido Trotter
903 a8083063 Iustin Pop
904 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
905 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
906 2c95a8d4 Iustin Pop

907 2c95a8d4 Iustin Pop
  """
908 2c95a8d4 Iustin Pop
  _OP_REQP = []
909 2c95a8d4 Iustin Pop
910 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
911 2c95a8d4 Iustin Pop
    """Check prerequisites.
912 2c95a8d4 Iustin Pop

913 2c95a8d4 Iustin Pop
    This has no prerequisites.
914 2c95a8d4 Iustin Pop

915 2c95a8d4 Iustin Pop
    """
916 2c95a8d4 Iustin Pop
    pass
917 2c95a8d4 Iustin Pop
918 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
919 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
920 2c95a8d4 Iustin Pop

921 2c95a8d4 Iustin Pop
    """
922 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
923 2c95a8d4 Iustin Pop
924 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
925 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
926 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
927 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
928 2c95a8d4 Iustin Pop
929 2c95a8d4 Iustin Pop
    nv_dict = {}
930 2c95a8d4 Iustin Pop
    for inst in instances:
931 2c95a8d4 Iustin Pop
      inst_lvs = {}
932 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
933 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
934 2c95a8d4 Iustin Pop
        continue
935 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
936 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
937 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
938 2c95a8d4 Iustin Pop
        for vol in vol_list:
939 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
940 2c95a8d4 Iustin Pop
941 2c95a8d4 Iustin Pop
    if not nv_dict:
942 2c95a8d4 Iustin Pop
      return result
943 2c95a8d4 Iustin Pop
944 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
945 2c95a8d4 Iustin Pop
946 2c95a8d4 Iustin Pop
    to_act = set()
947 2c95a8d4 Iustin Pop
    for node in nodes:
948 2c95a8d4 Iustin Pop
      # node_volume
949 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
950 2c95a8d4 Iustin Pop
951 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
952 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
953 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
954 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
955 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
956 2c95a8d4 Iustin Pop
                    (node,))
957 2c95a8d4 Iustin Pop
        res_nodes.append(node)
958 2c95a8d4 Iustin Pop
        continue
959 2c95a8d4 Iustin Pop
960 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
961 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
962 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
963 b63ed789 Iustin Pop
            and inst.name not in res_instances):
964 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
965 2c95a8d4 Iustin Pop
966 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
967 b63ed789 Iustin Pop
    # data better
968 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
969 b63ed789 Iustin Pop
      if inst.name not in res_missing:
970 b63ed789 Iustin Pop
        res_missing[inst.name] = []
971 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
972 b63ed789 Iustin Pop
973 2c95a8d4 Iustin Pop
    return result
974 2c95a8d4 Iustin Pop
975 2c95a8d4 Iustin Pop
976 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
977 07bd8a51 Iustin Pop
  """Rename the cluster.
978 07bd8a51 Iustin Pop

979 07bd8a51 Iustin Pop
  """
980 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
981 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
982 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
983 05f86716 Guido Trotter
  REQ_WSSTORE = True
984 07bd8a51 Iustin Pop
985 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
986 07bd8a51 Iustin Pop
    """Build hooks env.
987 07bd8a51 Iustin Pop

988 07bd8a51 Iustin Pop
    """
989 07bd8a51 Iustin Pop
    env = {
990 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
991 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
992 07bd8a51 Iustin Pop
      }
993 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
994 07bd8a51 Iustin Pop
    return env, [mn], [mn]
995 07bd8a51 Iustin Pop
996 07bd8a51 Iustin Pop
  def CheckPrereq(self):
997 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
998 07bd8a51 Iustin Pop

999 07bd8a51 Iustin Pop
    """
1000 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1001 07bd8a51 Iustin Pop
1002 bcf043c9 Iustin Pop
    new_name = hostname.name
1003 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1004 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
1005 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
1006 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1007 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1008 07bd8a51 Iustin Pop
                                 " cluster has changed")
1009 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1010 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1011 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1012 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1013 07bd8a51 Iustin Pop
                                   new_ip)
1014 07bd8a51 Iustin Pop
1015 07bd8a51 Iustin Pop
    self.op.name = new_name
1016 07bd8a51 Iustin Pop
1017 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1018 07bd8a51 Iustin Pop
    """Rename the cluster.
1019 07bd8a51 Iustin Pop

1020 07bd8a51 Iustin Pop
    """
1021 07bd8a51 Iustin Pop
    clustername = self.op.name
1022 07bd8a51 Iustin Pop
    ip = self.ip
1023 07bd8a51 Iustin Pop
    ss = self.sstore
1024 07bd8a51 Iustin Pop
1025 07bd8a51 Iustin Pop
    # shutdown the master IP
1026 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
1027 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
1028 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1029 07bd8a51 Iustin Pop
1030 07bd8a51 Iustin Pop
    try:
1031 07bd8a51 Iustin Pop
      # modify the sstore
1032 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1033 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1034 07bd8a51 Iustin Pop
1035 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1036 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1037 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1038 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1039 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1040 07bd8a51 Iustin Pop
1041 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1042 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1043 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1044 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1045 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1046 07bd8a51 Iustin Pop
          if not result[to_node]:
1047 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1048 07bd8a51 Iustin Pop
                         (fname, to_node))
1049 07bd8a51 Iustin Pop
    finally:
1050 1c65840b Iustin Pop
      if not rpc.call_node_start_master(master, False):
1051 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1052 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1053 07bd8a51 Iustin Pop
1054 07bd8a51 Iustin Pop
1055 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1056 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1057 8084f9f6 Manuel Franceschini

1058 8084f9f6 Manuel Franceschini
  Args:
1059 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1060 8084f9f6 Manuel Franceschini

1061 8084f9f6 Manuel Franceschini
  Returns:
1062 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1063 8084f9f6 Manuel Franceschini

1064 8084f9f6 Manuel Franceschini
  """
1065 8084f9f6 Manuel Franceschini
  if disk.children:
1066 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1067 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1068 8084f9f6 Manuel Franceschini
        return True
1069 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1070 8084f9f6 Manuel Franceschini
1071 8084f9f6 Manuel Franceschini
1072 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1073 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1074 8084f9f6 Manuel Franceschini

1075 8084f9f6 Manuel Franceschini
  """
1076 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1077 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1078 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1079 8084f9f6 Manuel Franceschini
1080 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1081 8084f9f6 Manuel Franceschini
    """Build hooks env.
1082 8084f9f6 Manuel Franceschini

1083 8084f9f6 Manuel Franceschini
    """
1084 8084f9f6 Manuel Franceschini
    env = {
1085 8084f9f6 Manuel Franceschini
      "OP_TARGET": self.sstore.GetClusterName(),
1086 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1087 8084f9f6 Manuel Franceschini
      }
1088 8084f9f6 Manuel Franceschini
    mn = self.sstore.GetMasterNode()
1089 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1090 8084f9f6 Manuel Franceschini
1091 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1092 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1093 8084f9f6 Manuel Franceschini

1094 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1095 5f83e263 Iustin Pop
    if the given volume group is valid.
1096 8084f9f6 Manuel Franceschini

1097 8084f9f6 Manuel Franceschini
    """
1098 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
1099 8084f9f6 Manuel Franceschini
      instances = [self.cfg.GetInstanceInfo(name)
1100 8084f9f6 Manuel Franceschini
                   for name in self.cfg.GetInstanceList()]
1101 8084f9f6 Manuel Franceschini
      for inst in instances:
1102 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1103 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1104 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1105 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1106 8084f9f6 Manuel Franceschini
1107 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1108 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1109 8084f9f6 Manuel Franceschini
      node_list = self.cfg.GetNodeList()
1110 8084f9f6 Manuel Franceschini
      vglist = rpc.call_vg_list(node_list)
1111 8084f9f6 Manuel Franceschini
      for node in node_list:
1112 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1113 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1114 8084f9f6 Manuel Franceschini
        if vgstatus:
1115 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1116 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1117 8084f9f6 Manuel Franceschini
1118 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1119 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1120 8084f9f6 Manuel Franceschini

1121 8084f9f6 Manuel Franceschini
    """
1122 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
1123 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
1124 8084f9f6 Manuel Franceschini
    else:
1125 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1126 8084f9f6 Manuel Franceschini
                  " state, not changing")
1127 8084f9f6 Manuel Franceschini
1128 8084f9f6 Manuel Franceschini
1129 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1130 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1131 a8083063 Iustin Pop

1132 a8083063 Iustin Pop
  """
1133 a8083063 Iustin Pop
  if not instance.disks:
1134 a8083063 Iustin Pop
    return True
1135 a8083063 Iustin Pop
1136 a8083063 Iustin Pop
  if not oneshot:
1137 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1138 a8083063 Iustin Pop
1139 a8083063 Iustin Pop
  node = instance.primary_node
1140 a8083063 Iustin Pop
1141 a8083063 Iustin Pop
  for dev in instance.disks:
1142 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1143 a8083063 Iustin Pop
1144 a8083063 Iustin Pop
  retries = 0
1145 a8083063 Iustin Pop
  while True:
1146 a8083063 Iustin Pop
    max_time = 0
1147 a8083063 Iustin Pop
    done = True
1148 a8083063 Iustin Pop
    cumul_degraded = False
1149 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1150 a8083063 Iustin Pop
    if not rstats:
1151 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1152 a8083063 Iustin Pop
      retries += 1
1153 a8083063 Iustin Pop
      if retries >= 10:
1154 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1155 3ecf6786 Iustin Pop
                                 " aborting." % node)
1156 a8083063 Iustin Pop
      time.sleep(6)
1157 a8083063 Iustin Pop
      continue
1158 a8083063 Iustin Pop
    retries = 0
1159 a8083063 Iustin Pop
    for i in range(len(rstats)):
1160 a8083063 Iustin Pop
      mstat = rstats[i]
1161 a8083063 Iustin Pop
      if mstat is None:
1162 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1163 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1164 a8083063 Iustin Pop
        continue
1165 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1166 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1167 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1168 a8083063 Iustin Pop
      if perc_done is not None:
1169 a8083063 Iustin Pop
        done = False
1170 a8083063 Iustin Pop
        if est_time is not None:
1171 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1172 a8083063 Iustin Pop
          max_time = est_time
1173 a8083063 Iustin Pop
        else:
1174 a8083063 Iustin Pop
          rem_time = "no time estimate"
1175 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1176 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1177 a8083063 Iustin Pop
    if done or oneshot:
1178 a8083063 Iustin Pop
      break
1179 a8083063 Iustin Pop
1180 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1181 a8083063 Iustin Pop
1182 a8083063 Iustin Pop
  if done:
1183 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1184 a8083063 Iustin Pop
  return not cumul_degraded
1185 a8083063 Iustin Pop
1186 a8083063 Iustin Pop
1187 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1188 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1189 a8083063 Iustin Pop

1190 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1191 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1192 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1193 0834c866 Iustin Pop

1194 a8083063 Iustin Pop
  """
1195 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1196 0834c866 Iustin Pop
  if ldisk:
1197 0834c866 Iustin Pop
    idx = 6
1198 0834c866 Iustin Pop
  else:
1199 0834c866 Iustin Pop
    idx = 5
1200 a8083063 Iustin Pop
1201 a8083063 Iustin Pop
  result = True
1202 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1203 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1204 a8083063 Iustin Pop
    if not rstats:
1205 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1206 a8083063 Iustin Pop
      result = False
1207 a8083063 Iustin Pop
    else:
1208 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1209 a8083063 Iustin Pop
  if dev.children:
1210 a8083063 Iustin Pop
    for child in dev.children:
1211 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1212 a8083063 Iustin Pop
1213 a8083063 Iustin Pop
  return result
1214 a8083063 Iustin Pop
1215 a8083063 Iustin Pop
1216 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1217 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1218 a8083063 Iustin Pop

1219 a8083063 Iustin Pop
  """
1220 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1221 a8083063 Iustin Pop
1222 a8083063 Iustin Pop
  def CheckPrereq(self):
1223 a8083063 Iustin Pop
    """Check prerequisites.
1224 a8083063 Iustin Pop

1225 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1226 a8083063 Iustin Pop

1227 a8083063 Iustin Pop
    """
1228 1f9430d6 Iustin Pop
    if self.op.names:
1229 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1230 1f9430d6 Iustin Pop
1231 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1232 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1233 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1234 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1235 1f9430d6 Iustin Pop
1236 1f9430d6 Iustin Pop
  @staticmethod
1237 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1238 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1239 1f9430d6 Iustin Pop

1240 1f9430d6 Iustin Pop
      Args:
1241 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1242 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1243 1f9430d6 Iustin Pop

1244 1f9430d6 Iustin Pop
      Returns:
1245 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1246 1f9430d6 Iustin Pop
             nodes as
1247 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1248 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1249 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1250 1f9430d6 Iustin Pop
                  }
1251 1f9430d6 Iustin Pop

1252 1f9430d6 Iustin Pop
    """
1253 1f9430d6 Iustin Pop
    all_os = {}
1254 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1255 1f9430d6 Iustin Pop
      if not nr:
1256 1f9430d6 Iustin Pop
        continue
1257 b4de68a9 Iustin Pop
      for os_obj in nr:
1258 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1259 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1260 1f9430d6 Iustin Pop
          # for each node in node_list
1261 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1262 1f9430d6 Iustin Pop
          for nname in node_list:
1263 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1264 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1265 1f9430d6 Iustin Pop
    return all_os
1266 a8083063 Iustin Pop
1267 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1268 a8083063 Iustin Pop
    """Compute the list of OSes.
1269 a8083063 Iustin Pop

1270 a8083063 Iustin Pop
    """
1271 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1272 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1273 a8083063 Iustin Pop
    if node_data == False:
1274 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1275 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1276 1f9430d6 Iustin Pop
    output = []
1277 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1278 1f9430d6 Iustin Pop
      row = []
1279 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1280 1f9430d6 Iustin Pop
        if field == "name":
1281 1f9430d6 Iustin Pop
          val = os_name
1282 1f9430d6 Iustin Pop
        elif field == "valid":
1283 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1284 1f9430d6 Iustin Pop
        elif field == "node_status":
1285 1f9430d6 Iustin Pop
          val = {}
1286 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1287 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1288 1f9430d6 Iustin Pop
        else:
1289 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1290 1f9430d6 Iustin Pop
        row.append(val)
1291 1f9430d6 Iustin Pop
      output.append(row)
1292 1f9430d6 Iustin Pop
1293 1f9430d6 Iustin Pop
    return output
1294 a8083063 Iustin Pop
1295 a8083063 Iustin Pop
1296 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1297 a8083063 Iustin Pop
  """Logical unit for removing a node.
1298 a8083063 Iustin Pop

1299 a8083063 Iustin Pop
  """
1300 a8083063 Iustin Pop
  HPATH = "node-remove"
1301 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1302 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1303 a8083063 Iustin Pop
1304 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1305 a8083063 Iustin Pop
    """Build hooks env.
1306 a8083063 Iustin Pop

1307 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1308 d08869ee Guido Trotter
    node would then be impossible to remove.
1309 a8083063 Iustin Pop

1310 a8083063 Iustin Pop
    """
1311 396e1b78 Michael Hanselmann
    env = {
1312 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1313 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1314 396e1b78 Michael Hanselmann
      }
1315 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1316 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1317 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1318 a8083063 Iustin Pop
1319 a8083063 Iustin Pop
  def CheckPrereq(self):
1320 a8083063 Iustin Pop
    """Check prerequisites.
1321 a8083063 Iustin Pop

1322 a8083063 Iustin Pop
    This checks:
1323 a8083063 Iustin Pop
     - the node exists in the configuration
1324 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1325 a8083063 Iustin Pop
     - it's not the master
1326 a8083063 Iustin Pop

1327 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1328 a8083063 Iustin Pop

1329 a8083063 Iustin Pop
    """
1330 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1331 a8083063 Iustin Pop
    if node is None:
1332 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1333 a8083063 Iustin Pop
1334 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1335 a8083063 Iustin Pop
1336 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1337 a8083063 Iustin Pop
    if node.name == masternode:
1338 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1339 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1340 a8083063 Iustin Pop
1341 a8083063 Iustin Pop
    for instance_name in instance_list:
1342 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1343 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1344 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1345 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1346 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1347 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1348 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1349 a8083063 Iustin Pop
    self.op.node_name = node.name
1350 a8083063 Iustin Pop
    self.node = node
1351 a8083063 Iustin Pop
1352 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1353 a8083063 Iustin Pop
    """Removes the node from the cluster.
1354 a8083063 Iustin Pop

1355 a8083063 Iustin Pop
    """
1356 a8083063 Iustin Pop
    node = self.node
1357 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1358 a8083063 Iustin Pop
                node.name)
1359 a8083063 Iustin Pop
1360 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1361 a8083063 Iustin Pop
1362 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1363 a8083063 Iustin Pop
1364 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1365 a2fd9afc Guido Trotter
    # Remove the node from the Ganeti Lock Manager
1366 a2fd9afc Guido Trotter
    self.context.glm.remove(locking.LEVEL_NODE, node.name)
1367 a8083063 Iustin Pop
1368 d9c02ca6 Michael Hanselmann
    utils.RemoveHostFromEtcHosts(node.name)
1369 c8a0948f Michael Hanselmann
1370 a8083063 Iustin Pop
1371 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1372 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1373 a8083063 Iustin Pop

1374 a8083063 Iustin Pop
  """
1375 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1376 a8083063 Iustin Pop
1377 a8083063 Iustin Pop
  def CheckPrereq(self):
1378 a8083063 Iustin Pop
    """Check prerequisites.
1379 a8083063 Iustin Pop

1380 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1381 a8083063 Iustin Pop

1382 a8083063 Iustin Pop
    """
1383 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1384 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1385 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1386 e8a4c138 Iustin Pop
      "bootid",
1387 e8a4c138 Iustin Pop
      "ctotal",
1388 e8a4c138 Iustin Pop
      ])
1389 a8083063 Iustin Pop
1390 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1391 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1392 130a6a6f Iustin Pop
                               "pip", "sip", "tags"],
1393 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1394 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1395 a8083063 Iustin Pop
1396 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1397 a8083063 Iustin Pop
1398 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1399 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1400 a8083063 Iustin Pop

1401 a8083063 Iustin Pop
    """
1402 246e180a Iustin Pop
    nodenames = self.wanted
1403 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1404 a8083063 Iustin Pop
1405 a8083063 Iustin Pop
    # begin data gathering
1406 a8083063 Iustin Pop
1407 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1408 a8083063 Iustin Pop
      live_data = {}
1409 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1410 a8083063 Iustin Pop
      for name in nodenames:
1411 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1412 a8083063 Iustin Pop
        if nodeinfo:
1413 a8083063 Iustin Pop
          live_data[name] = {
1414 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1415 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1416 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1417 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1418 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1419 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1420 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1421 a8083063 Iustin Pop
            }
1422 a8083063 Iustin Pop
        else:
1423 a8083063 Iustin Pop
          live_data[name] = {}
1424 a8083063 Iustin Pop
    else:
1425 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1426 a8083063 Iustin Pop
1427 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1428 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1429 a8083063 Iustin Pop
1430 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1431 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1432 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1433 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1434 a8083063 Iustin Pop
1435 ec223efb Iustin Pop
      for instance_name in instancelist:
1436 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1437 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1438 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1439 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1440 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1441 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1442 a8083063 Iustin Pop
1443 a8083063 Iustin Pop
    # end data gathering
1444 a8083063 Iustin Pop
1445 a8083063 Iustin Pop
    output = []
1446 a8083063 Iustin Pop
    for node in nodelist:
1447 a8083063 Iustin Pop
      node_output = []
1448 a8083063 Iustin Pop
      for field in self.op.output_fields:
1449 a8083063 Iustin Pop
        if field == "name":
1450 a8083063 Iustin Pop
          val = node.name
1451 ec223efb Iustin Pop
        elif field == "pinst_list":
1452 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1453 ec223efb Iustin Pop
        elif field == "sinst_list":
1454 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1455 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1456 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1457 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1458 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1459 a8083063 Iustin Pop
        elif field == "pip":
1460 a8083063 Iustin Pop
          val = node.primary_ip
1461 a8083063 Iustin Pop
        elif field == "sip":
1462 a8083063 Iustin Pop
          val = node.secondary_ip
1463 130a6a6f Iustin Pop
        elif field == "tags":
1464 130a6a6f Iustin Pop
          val = list(node.GetTags())
1465 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1466 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1467 a8083063 Iustin Pop
        else:
1468 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1469 a8083063 Iustin Pop
        node_output.append(val)
1470 a8083063 Iustin Pop
      output.append(node_output)
1471 a8083063 Iustin Pop
1472 a8083063 Iustin Pop
    return output
1473 a8083063 Iustin Pop
1474 a8083063 Iustin Pop
1475 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1476 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1477 dcb93971 Michael Hanselmann

1478 dcb93971 Michael Hanselmann
  """
1479 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1480 dcb93971 Michael Hanselmann
1481 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1482 dcb93971 Michael Hanselmann
    """Check prerequisites.
1483 dcb93971 Michael Hanselmann

1484 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1485 dcb93971 Michael Hanselmann

1486 dcb93971 Michael Hanselmann
    """
1487 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1488 dcb93971 Michael Hanselmann
1489 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1490 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1491 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1492 dcb93971 Michael Hanselmann
1493 dcb93971 Michael Hanselmann
1494 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1495 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1496 dcb93971 Michael Hanselmann

1497 dcb93971 Michael Hanselmann
    """
1498 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1499 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1500 dcb93971 Michael Hanselmann
1501 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1502 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1503 dcb93971 Michael Hanselmann
1504 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1505 dcb93971 Michael Hanselmann
1506 dcb93971 Michael Hanselmann
    output = []
1507 dcb93971 Michael Hanselmann
    for node in nodenames:
1508 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1509 37d19eb2 Michael Hanselmann
        continue
1510 37d19eb2 Michael Hanselmann
1511 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1512 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1513 dcb93971 Michael Hanselmann
1514 dcb93971 Michael Hanselmann
      for vol in node_vols:
1515 dcb93971 Michael Hanselmann
        node_output = []
1516 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1517 dcb93971 Michael Hanselmann
          if field == "node":
1518 dcb93971 Michael Hanselmann
            val = node
1519 dcb93971 Michael Hanselmann
          elif field == "phys":
1520 dcb93971 Michael Hanselmann
            val = vol['dev']
1521 dcb93971 Michael Hanselmann
          elif field == "vg":
1522 dcb93971 Michael Hanselmann
            val = vol['vg']
1523 dcb93971 Michael Hanselmann
          elif field == "name":
1524 dcb93971 Michael Hanselmann
            val = vol['name']
1525 dcb93971 Michael Hanselmann
          elif field == "size":
1526 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1527 dcb93971 Michael Hanselmann
          elif field == "instance":
1528 dcb93971 Michael Hanselmann
            for inst in ilist:
1529 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1530 dcb93971 Michael Hanselmann
                continue
1531 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1532 dcb93971 Michael Hanselmann
                val = inst.name
1533 dcb93971 Michael Hanselmann
                break
1534 dcb93971 Michael Hanselmann
            else:
1535 dcb93971 Michael Hanselmann
              val = '-'
1536 dcb93971 Michael Hanselmann
          else:
1537 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1538 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1539 dcb93971 Michael Hanselmann
1540 dcb93971 Michael Hanselmann
        output.append(node_output)
1541 dcb93971 Michael Hanselmann
1542 dcb93971 Michael Hanselmann
    return output
1543 dcb93971 Michael Hanselmann
1544 dcb93971 Michael Hanselmann
1545 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1546 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1547 a8083063 Iustin Pop

1548 a8083063 Iustin Pop
  """
1549 a8083063 Iustin Pop
  HPATH = "node-add"
1550 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1551 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1552 a8083063 Iustin Pop
1553 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1554 a8083063 Iustin Pop
    """Build hooks env.
1555 a8083063 Iustin Pop

1556 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1557 a8083063 Iustin Pop

1558 a8083063 Iustin Pop
    """
1559 a8083063 Iustin Pop
    env = {
1560 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1561 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1562 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1563 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1564 a8083063 Iustin Pop
      }
1565 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1566 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1567 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1568 a8083063 Iustin Pop
1569 a8083063 Iustin Pop
  def CheckPrereq(self):
1570 a8083063 Iustin Pop
    """Check prerequisites.
1571 a8083063 Iustin Pop

1572 a8083063 Iustin Pop
    This checks:
1573 a8083063 Iustin Pop
     - the new node is not already in the config
1574 a8083063 Iustin Pop
     - it is resolvable
1575 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1576 a8083063 Iustin Pop

1577 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1578 a8083063 Iustin Pop

1579 a8083063 Iustin Pop
    """
1580 a8083063 Iustin Pop
    node_name = self.op.node_name
1581 a8083063 Iustin Pop
    cfg = self.cfg
1582 a8083063 Iustin Pop
1583 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1584 a8083063 Iustin Pop
1585 bcf043c9 Iustin Pop
    node = dns_data.name
1586 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1587 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1588 a8083063 Iustin Pop
    if secondary_ip is None:
1589 a8083063 Iustin Pop
      secondary_ip = primary_ip
1590 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1591 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1592 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1593 e7c6e02b Michael Hanselmann
1594 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1595 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1596 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1597 e7c6e02b Michael Hanselmann
                                 node)
1598 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1599 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1600 a8083063 Iustin Pop
1601 a8083063 Iustin Pop
    for existing_node_name in node_list:
1602 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1603 e7c6e02b Michael Hanselmann
1604 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1605 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1606 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1607 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1608 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1609 e7c6e02b Michael Hanselmann
        continue
1610 e7c6e02b Michael Hanselmann
1611 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1612 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1613 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1614 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1615 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1616 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1617 a8083063 Iustin Pop
1618 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1619 a8083063 Iustin Pop
    # same as for the master
1620 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1621 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1622 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1623 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1624 a8083063 Iustin Pop
      if master_singlehomed:
1625 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1626 3ecf6786 Iustin Pop
                                   " new node has one")
1627 a8083063 Iustin Pop
      else:
1628 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1629 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1630 a8083063 Iustin Pop
1631 a8083063 Iustin Pop
    # checks reachablity
1632 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1633 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1634 a8083063 Iustin Pop
1635 a8083063 Iustin Pop
    if not newbie_singlehomed:
1636 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1637 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1638 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1639 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1640 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1641 a8083063 Iustin Pop
1642 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1643 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1644 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1645 a8083063 Iustin Pop
1646 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1647 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1648 a8083063 Iustin Pop

1649 a8083063 Iustin Pop
    """
1650 a8083063 Iustin Pop
    new_node = self.new_node
1651 a8083063 Iustin Pop
    node = new_node.name
1652 a8083063 Iustin Pop
1653 a8083063 Iustin Pop
    # check connectivity
1654 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1655 a8083063 Iustin Pop
    if result:
1656 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1657 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1658 a8083063 Iustin Pop
                    (node, result))
1659 a8083063 Iustin Pop
      else:
1660 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1661 3ecf6786 Iustin Pop
                                 " node version %s" %
1662 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1663 a8083063 Iustin Pop
    else:
1664 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1665 a8083063 Iustin Pop
1666 a8083063 Iustin Pop
    # setup ssh on node
1667 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1668 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1669 a8083063 Iustin Pop
    keyarray = []
1670 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1671 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1672 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1673 a8083063 Iustin Pop
1674 a8083063 Iustin Pop
    for i in keyfiles:
1675 a8083063 Iustin Pop
      f = open(i, 'r')
1676 a8083063 Iustin Pop
      try:
1677 a8083063 Iustin Pop
        keyarray.append(f.read())
1678 a8083063 Iustin Pop
      finally:
1679 a8083063 Iustin Pop
        f.close()
1680 a8083063 Iustin Pop
1681 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1682 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1683 a8083063 Iustin Pop
1684 a8083063 Iustin Pop
    if not result:
1685 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1686 a8083063 Iustin Pop
1687 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1688 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1689 c8a0948f Michael Hanselmann
1690 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1691 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1692 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1693 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1694 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1695 16abfbc2 Alexander Schreiber
                                    10, False):
1696 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1697 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1698 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1699 a8083063 Iustin Pop
1700 5c0527ed Guido Trotter
    node_verify_list = [self.sstore.GetMasterNode()]
1701 5c0527ed Guido Trotter
    node_verify_param = {
1702 5c0527ed Guido Trotter
      'nodelist': [node],
1703 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1704 5c0527ed Guido Trotter
    }
1705 5c0527ed Guido Trotter
1706 5c0527ed Guido Trotter
    result = rpc.call_node_verify(node_verify_list, node_verify_param)
1707 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1708 5c0527ed Guido Trotter
      if not result[verifier]:
1709 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1710 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1711 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1712 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1713 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1714 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1715 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1716 ff98055b Iustin Pop
1717 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1718 a8083063 Iustin Pop
    # including the node just added
1719 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1720 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1721 102b115b Michael Hanselmann
    if not self.op.readd:
1722 102b115b Michael Hanselmann
      dist_nodes.append(node)
1723 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1724 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1725 a8083063 Iustin Pop
1726 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1727 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1728 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1729 a8083063 Iustin Pop
      for to_node in dist_nodes:
1730 a8083063 Iustin Pop
        if not result[to_node]:
1731 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1732 a8083063 Iustin Pop
                       (fname, to_node))
1733 a8083063 Iustin Pop
1734 3d1e7706 Guido Trotter
    to_copy = self.sstore.GetFileList()
1735 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1736 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1737 a8083063 Iustin Pop
    for fname in to_copy:
1738 b5602d15 Guido Trotter
      result = rpc.call_upload_file([node], fname)
1739 b5602d15 Guido Trotter
      if not result[node]:
1740 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1741 a8083063 Iustin Pop
1742 e7c6e02b Michael Hanselmann
    if not self.op.readd:
1743 e7c6e02b Michael Hanselmann
      logger.Info("adding node %s to cluster.conf" % node)
1744 e7c6e02b Michael Hanselmann
      self.cfg.AddNode(new_node)
1745 a2fd9afc Guido Trotter
      # Add the new node to the Ganeti Lock Manager
1746 a2fd9afc Guido Trotter
      self.context.glm.add(locking.LEVEL_NODE, node)
1747 a8083063 Iustin Pop
1748 a8083063 Iustin Pop
1749 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1750 a8083063 Iustin Pop
  """Query cluster configuration.
1751 a8083063 Iustin Pop

1752 a8083063 Iustin Pop
  """
1753 a8083063 Iustin Pop
  _OP_REQP = []
1754 59322403 Iustin Pop
  REQ_MASTER = False
1755 642339cf Guido Trotter
  REQ_BGL = False
1756 642339cf Guido Trotter
1757 642339cf Guido Trotter
  def ExpandNames(self):
1758 642339cf Guido Trotter
    self.needed_locks = {}
1759 a8083063 Iustin Pop
1760 a8083063 Iustin Pop
  def CheckPrereq(self):
1761 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1762 a8083063 Iustin Pop

1763 a8083063 Iustin Pop
    """
1764 a8083063 Iustin Pop
    pass
1765 a8083063 Iustin Pop
1766 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1767 a8083063 Iustin Pop
    """Return cluster config.
1768 a8083063 Iustin Pop

1769 a8083063 Iustin Pop
    """
1770 a8083063 Iustin Pop
    result = {
1771 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1772 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1773 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1774 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1775 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1776 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1777 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1778 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1779 8a12ce45 Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
1780 a8083063 Iustin Pop
      }
1781 a8083063 Iustin Pop
1782 a8083063 Iustin Pop
    return result
1783 a8083063 Iustin Pop
1784 a8083063 Iustin Pop
1785 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1786 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1787 a8083063 Iustin Pop

1788 a8083063 Iustin Pop
  """
1789 a8083063 Iustin Pop
  _OP_REQP = []
1790 642339cf Guido Trotter
  REQ_BGL = False
1791 642339cf Guido Trotter
1792 642339cf Guido Trotter
  def ExpandNames(self):
1793 642339cf Guido Trotter
    self.needed_locks = {}
1794 a8083063 Iustin Pop
1795 a8083063 Iustin Pop
  def CheckPrereq(self):
1796 a8083063 Iustin Pop
    """No prerequisites.
1797 a8083063 Iustin Pop

1798 a8083063 Iustin Pop
    """
1799 a8083063 Iustin Pop
    pass
1800 a8083063 Iustin Pop
1801 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1802 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1803 a8083063 Iustin Pop

1804 a8083063 Iustin Pop
    """
1805 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1806 a8083063 Iustin Pop
1807 a8083063 Iustin Pop
1808 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1809 a8083063 Iustin Pop
  """Bring up an instance's disks.
1810 a8083063 Iustin Pop

1811 a8083063 Iustin Pop
  """
1812 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1813 a8083063 Iustin Pop
1814 a8083063 Iustin Pop
  def CheckPrereq(self):
1815 a8083063 Iustin Pop
    """Check prerequisites.
1816 a8083063 Iustin Pop

1817 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1818 a8083063 Iustin Pop

1819 a8083063 Iustin Pop
    """
1820 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1821 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1822 a8083063 Iustin Pop
    if instance is None:
1823 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1824 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1825 a8083063 Iustin Pop
    self.instance = instance
1826 a8083063 Iustin Pop
1827 a8083063 Iustin Pop
1828 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1829 a8083063 Iustin Pop
    """Activate the disks.
1830 a8083063 Iustin Pop

1831 a8083063 Iustin Pop
    """
1832 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1833 a8083063 Iustin Pop
    if not disks_ok:
1834 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1835 a8083063 Iustin Pop
1836 a8083063 Iustin Pop
    return disks_info
1837 a8083063 Iustin Pop
1838 a8083063 Iustin Pop
1839 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1840 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1841 a8083063 Iustin Pop

1842 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1843 a8083063 Iustin Pop

1844 a8083063 Iustin Pop
  Args:
1845 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1846 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1847 a8083063 Iustin Pop
                        in an error return from the function
1848 a8083063 Iustin Pop

1849 a8083063 Iustin Pop
  Returns:
1850 a8083063 Iustin Pop
    false if the operation failed
1851 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1852 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1853 a8083063 Iustin Pop
  """
1854 a8083063 Iustin Pop
  device_info = []
1855 a8083063 Iustin Pop
  disks_ok = True
1856 fdbd668d Iustin Pop
  iname = instance.name
1857 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1858 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1859 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1860 fdbd668d Iustin Pop
1861 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1862 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1863 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1864 fdbd668d Iustin Pop
  # SyncSource, etc.)
1865 fdbd668d Iustin Pop
1866 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1867 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1868 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1869 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1870 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1871 a8083063 Iustin Pop
      if not result:
1872 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1873 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1874 fdbd668d Iustin Pop
        if not ignore_secondaries:
1875 a8083063 Iustin Pop
          disks_ok = False
1876 fdbd668d Iustin Pop
1877 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1878 fdbd668d Iustin Pop
1879 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1880 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1881 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1882 fdbd668d Iustin Pop
      if node != instance.primary_node:
1883 fdbd668d Iustin Pop
        continue
1884 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
1885 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1886 fdbd668d Iustin Pop
      if not result:
1887 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1888 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1889 fdbd668d Iustin Pop
        disks_ok = False
1890 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1891 a8083063 Iustin Pop
1892 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1893 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1894 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1895 b352ab5b Iustin Pop
  for disk in instance.disks:
1896 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1897 b352ab5b Iustin Pop
1898 a8083063 Iustin Pop
  return disks_ok, device_info
1899 a8083063 Iustin Pop
1900 a8083063 Iustin Pop
1901 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1902 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1903 3ecf6786 Iustin Pop

1904 3ecf6786 Iustin Pop
  """
1905 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1906 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1907 fe7b0351 Michael Hanselmann
  if not disks_ok:
1908 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1909 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1910 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1911 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1912 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1913 fe7b0351 Michael Hanselmann
1914 fe7b0351 Michael Hanselmann
1915 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1916 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1917 a8083063 Iustin Pop

1918 a8083063 Iustin Pop
  """
1919 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1920 a8083063 Iustin Pop
1921 a8083063 Iustin Pop
  def CheckPrereq(self):
1922 a8083063 Iustin Pop
    """Check prerequisites.
1923 a8083063 Iustin Pop

1924 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1925 a8083063 Iustin Pop

1926 a8083063 Iustin Pop
    """
1927 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1928 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1929 a8083063 Iustin Pop
    if instance is None:
1930 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1931 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1932 a8083063 Iustin Pop
    self.instance = instance
1933 a8083063 Iustin Pop
1934 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1935 a8083063 Iustin Pop
    """Deactivate the disks
1936 a8083063 Iustin Pop

1937 a8083063 Iustin Pop
    """
1938 a8083063 Iustin Pop
    instance = self.instance
1939 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1940 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1941 a8083063 Iustin Pop
    if not type(ins_l) is list:
1942 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1943 3ecf6786 Iustin Pop
                               instance.primary_node)
1944 a8083063 Iustin Pop
1945 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1946 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1947 3ecf6786 Iustin Pop
                               " block devices.")
1948 a8083063 Iustin Pop
1949 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1950 a8083063 Iustin Pop
1951 a8083063 Iustin Pop
1952 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1953 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1954 a8083063 Iustin Pop

1955 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1956 a8083063 Iustin Pop

1957 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1958 a8083063 Iustin Pop
  ignored.
1959 a8083063 Iustin Pop

1960 a8083063 Iustin Pop
  """
1961 a8083063 Iustin Pop
  result = True
1962 a8083063 Iustin Pop
  for disk in instance.disks:
1963 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1964 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1965 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1966 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1967 a8083063 Iustin Pop
                     (disk.iv_name, node))
1968 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1969 a8083063 Iustin Pop
          result = False
1970 a8083063 Iustin Pop
  return result
1971 a8083063 Iustin Pop
1972 a8083063 Iustin Pop
1973 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
1974 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
1975 d4f16fd9 Iustin Pop

1976 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
1977 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
1978 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
1979 d4f16fd9 Iustin Pop
  exception.
1980 d4f16fd9 Iustin Pop

1981 d4f16fd9 Iustin Pop
  Args:
1982 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
1983 d4f16fd9 Iustin Pop
    - node: the node name
1984 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
1985 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
1986 d4f16fd9 Iustin Pop

1987 d4f16fd9 Iustin Pop
  """
1988 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
1989 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
1990 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
1991 d4f16fd9 Iustin Pop
                             " information" % (node,))
1992 d4f16fd9 Iustin Pop
1993 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
1994 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
1995 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
1996 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
1997 d4f16fd9 Iustin Pop
  if requested > free_mem:
1998 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
1999 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2000 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2001 d4f16fd9 Iustin Pop
2002 d4f16fd9 Iustin Pop
2003 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2004 a8083063 Iustin Pop
  """Starts an instance.
2005 a8083063 Iustin Pop

2006 a8083063 Iustin Pop
  """
2007 a8083063 Iustin Pop
  HPATH = "instance-start"
2008 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2009 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2010 a8083063 Iustin Pop
2011 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2012 a8083063 Iustin Pop
    """Build hooks env.
2013 a8083063 Iustin Pop

2014 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2015 a8083063 Iustin Pop

2016 a8083063 Iustin Pop
    """
2017 a8083063 Iustin Pop
    env = {
2018 a8083063 Iustin Pop
      "FORCE": self.op.force,
2019 a8083063 Iustin Pop
      }
2020 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2021 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2022 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2023 a8083063 Iustin Pop
    return env, nl, nl
2024 a8083063 Iustin Pop
2025 a8083063 Iustin Pop
  def CheckPrereq(self):
2026 a8083063 Iustin Pop
    """Check prerequisites.
2027 a8083063 Iustin Pop

2028 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2029 a8083063 Iustin Pop

2030 a8083063 Iustin Pop
    """
2031 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2032 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2033 a8083063 Iustin Pop
    if instance is None:
2034 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2035 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2036 a8083063 Iustin Pop
2037 a8083063 Iustin Pop
    # check bridges existance
2038 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2039 a8083063 Iustin Pop
2040 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2041 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2042 d4f16fd9 Iustin Pop
                         instance.memory)
2043 d4f16fd9 Iustin Pop
2044 a8083063 Iustin Pop
    self.instance = instance
2045 a8083063 Iustin Pop
    self.op.instance_name = instance.name
2046 a8083063 Iustin Pop
2047 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2048 a8083063 Iustin Pop
    """Start the instance.
2049 a8083063 Iustin Pop

2050 a8083063 Iustin Pop
    """
2051 a8083063 Iustin Pop
    instance = self.instance
2052 a8083063 Iustin Pop
    force = self.op.force
2053 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2054 a8083063 Iustin Pop
2055 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2056 fe482621 Iustin Pop
2057 a8083063 Iustin Pop
    node_current = instance.primary_node
2058 a8083063 Iustin Pop
2059 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2060 a8083063 Iustin Pop
2061 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2062 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2063 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2064 a8083063 Iustin Pop
2065 a8083063 Iustin Pop
2066 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2067 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2068 bf6929a2 Alexander Schreiber

2069 bf6929a2 Alexander Schreiber
  """
2070 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2071 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2072 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2073 bf6929a2 Alexander Schreiber
2074 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2075 bf6929a2 Alexander Schreiber
    """Build hooks env.
2076 bf6929a2 Alexander Schreiber

2077 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2078 bf6929a2 Alexander Schreiber

2079 bf6929a2 Alexander Schreiber
    """
2080 bf6929a2 Alexander Schreiber
    env = {
2081 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2082 bf6929a2 Alexander Schreiber
      }
2083 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2084 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2085 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2086 bf6929a2 Alexander Schreiber
    return env, nl, nl
2087 bf6929a2 Alexander Schreiber
2088 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2089 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2090 bf6929a2 Alexander Schreiber

2091 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2092 bf6929a2 Alexander Schreiber

2093 bf6929a2 Alexander Schreiber
    """
2094 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
2095 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
2096 bf6929a2 Alexander Schreiber
    if instance is None:
2097 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
2098 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
2099 bf6929a2 Alexander Schreiber
2100 bf6929a2 Alexander Schreiber
    # check bridges existance
2101 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2102 bf6929a2 Alexander Schreiber
2103 bf6929a2 Alexander Schreiber
    self.instance = instance
2104 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
2105 bf6929a2 Alexander Schreiber
2106 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2107 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2108 bf6929a2 Alexander Schreiber

2109 bf6929a2 Alexander Schreiber
    """
2110 bf6929a2 Alexander Schreiber
    instance = self.instance
2111 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2112 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2113 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2114 bf6929a2 Alexander Schreiber
2115 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2116 bf6929a2 Alexander Schreiber
2117 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2118 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
2119 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
2120 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2121 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
2122 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2123 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2124 bf6929a2 Alexander Schreiber
2125 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2126 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2127 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2128 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2129 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2130 bf6929a2 Alexander Schreiber
    else:
2131 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2132 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2133 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2134 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2135 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2136 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2137 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2138 bf6929a2 Alexander Schreiber
2139 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2140 bf6929a2 Alexander Schreiber
2141 bf6929a2 Alexander Schreiber
2142 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2143 a8083063 Iustin Pop
  """Shutdown an instance.
2144 a8083063 Iustin Pop

2145 a8083063 Iustin Pop
  """
2146 a8083063 Iustin Pop
  HPATH = "instance-stop"
2147 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2148 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2149 a8083063 Iustin Pop
2150 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2151 a8083063 Iustin Pop
    """Build hooks env.
2152 a8083063 Iustin Pop

2153 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2154 a8083063 Iustin Pop

2155 a8083063 Iustin Pop
    """
2156 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2157 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2158 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2159 a8083063 Iustin Pop
    return env, nl, nl
2160 a8083063 Iustin Pop
2161 a8083063 Iustin Pop
  def CheckPrereq(self):
2162 a8083063 Iustin Pop
    """Check prerequisites.
2163 a8083063 Iustin Pop

2164 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2165 a8083063 Iustin Pop

2166 a8083063 Iustin Pop
    """
2167 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2168 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2169 a8083063 Iustin Pop
    if instance is None:
2170 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2171 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2172 a8083063 Iustin Pop
    self.instance = instance
2173 a8083063 Iustin Pop
2174 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2175 a8083063 Iustin Pop
    """Shutdown the instance.
2176 a8083063 Iustin Pop

2177 a8083063 Iustin Pop
    """
2178 a8083063 Iustin Pop
    instance = self.instance
2179 a8083063 Iustin Pop
    node_current = instance.primary_node
2180 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2181 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2182 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2183 a8083063 Iustin Pop
2184 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2185 a8083063 Iustin Pop
2186 a8083063 Iustin Pop
2187 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2188 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2189 fe7b0351 Michael Hanselmann

2190 fe7b0351 Michael Hanselmann
  """
2191 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2192 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2193 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2194 fe7b0351 Michael Hanselmann
2195 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2196 fe7b0351 Michael Hanselmann
    """Build hooks env.
2197 fe7b0351 Michael Hanselmann

2198 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2199 fe7b0351 Michael Hanselmann

2200 fe7b0351 Michael Hanselmann
    """
2201 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2202 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2203 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2204 fe7b0351 Michael Hanselmann
    return env, nl, nl
2205 fe7b0351 Michael Hanselmann
2206 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2207 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2208 fe7b0351 Michael Hanselmann

2209 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2210 fe7b0351 Michael Hanselmann

2211 fe7b0351 Michael Hanselmann
    """
2212 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2213 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2214 fe7b0351 Michael Hanselmann
    if instance is None:
2215 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2216 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2217 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2218 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2219 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2220 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2221 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2222 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2223 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2224 fe7b0351 Michael Hanselmann
    if remote_info:
2225 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2226 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2227 3ecf6786 Iustin Pop
                                  instance.primary_node))
2228 d0834de3 Michael Hanselmann
2229 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2230 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2231 d0834de3 Michael Hanselmann
      # OS verification
2232 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2233 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2234 d0834de3 Michael Hanselmann
      if pnode is None:
2235 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2236 3ecf6786 Iustin Pop
                                   self.op.pnode)
2237 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2238 dfa96ded Guido Trotter
      if not os_obj:
2239 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2240 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2241 d0834de3 Michael Hanselmann
2242 fe7b0351 Michael Hanselmann
    self.instance = instance
2243 fe7b0351 Michael Hanselmann
2244 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2245 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2246 fe7b0351 Michael Hanselmann

2247 fe7b0351 Michael Hanselmann
    """
2248 fe7b0351 Michael Hanselmann
    inst = self.instance
2249 fe7b0351 Michael Hanselmann
2250 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2251 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2252 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2253 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2254 d0834de3 Michael Hanselmann
2255 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2256 fe7b0351 Michael Hanselmann
    try:
2257 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2258 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2259 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2260 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2261 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2262 fe7b0351 Michael Hanselmann
    finally:
2263 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2264 fe7b0351 Michael Hanselmann
2265 fe7b0351 Michael Hanselmann
2266 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2267 decd5f45 Iustin Pop
  """Rename an instance.
2268 decd5f45 Iustin Pop

2269 decd5f45 Iustin Pop
  """
2270 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2271 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2272 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2273 decd5f45 Iustin Pop
2274 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2275 decd5f45 Iustin Pop
    """Build hooks env.
2276 decd5f45 Iustin Pop

2277 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2278 decd5f45 Iustin Pop

2279 decd5f45 Iustin Pop
    """
2280 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2281 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2282 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2283 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2284 decd5f45 Iustin Pop
    return env, nl, nl
2285 decd5f45 Iustin Pop
2286 decd5f45 Iustin Pop
  def CheckPrereq(self):
2287 decd5f45 Iustin Pop
    """Check prerequisites.
2288 decd5f45 Iustin Pop

2289 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2290 decd5f45 Iustin Pop

2291 decd5f45 Iustin Pop
    """
2292 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2293 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2294 decd5f45 Iustin Pop
    if instance is None:
2295 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2296 decd5f45 Iustin Pop
                                 self.op.instance_name)
2297 decd5f45 Iustin Pop
    if instance.status != "down":
2298 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2299 decd5f45 Iustin Pop
                                 self.op.instance_name)
2300 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2301 decd5f45 Iustin Pop
    if remote_info:
2302 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2303 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2304 decd5f45 Iustin Pop
                                  instance.primary_node))
2305 decd5f45 Iustin Pop
    self.instance = instance
2306 decd5f45 Iustin Pop
2307 decd5f45 Iustin Pop
    # new name verification
2308 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2309 decd5f45 Iustin Pop
2310 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2311 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2312 7bde3275 Guido Trotter
    if new_name in instance_list:
2313 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2314 c09f363f Manuel Franceschini
                                 new_name)
2315 7bde3275 Guido Trotter
2316 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2317 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2318 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2319 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2320 decd5f45 Iustin Pop
2321 decd5f45 Iustin Pop
2322 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2323 decd5f45 Iustin Pop
    """Reinstall the instance.
2324 decd5f45 Iustin Pop

2325 decd5f45 Iustin Pop
    """
2326 decd5f45 Iustin Pop
    inst = self.instance
2327 decd5f45 Iustin Pop
    old_name = inst.name
2328 decd5f45 Iustin Pop
2329 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2330 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2331 b23c4333 Manuel Franceschini
2332 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2333 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2334 74b5913f Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
2335 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2336 decd5f45 Iustin Pop
2337 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2338 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2339 decd5f45 Iustin Pop
2340 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2341 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2342 b23c4333 Manuel Franceschini
      result = rpc.call_file_storage_dir_rename(inst.primary_node,
2343 b23c4333 Manuel Franceschini
                                                old_file_storage_dir,
2344 b23c4333 Manuel Franceschini
                                                new_file_storage_dir)
2345 b23c4333 Manuel Franceschini
2346 b23c4333 Manuel Franceschini
      if not result:
2347 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2348 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2349 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2350 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2351 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2352 b23c4333 Manuel Franceschini
2353 b23c4333 Manuel Franceschini
      if not result[0]:
2354 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2355 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2356 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2357 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2358 b23c4333 Manuel Franceschini
2359 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2360 decd5f45 Iustin Pop
    try:
2361 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2362 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2363 f4bc1f2c Michael Hanselmann
        msg = ("Could run OS rename script for instance %s on node %s (but the"
2364 f4bc1f2c Michael Hanselmann
               " instance has been renamed in Ganeti)" %
2365 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2366 decd5f45 Iustin Pop
        logger.Error(msg)
2367 decd5f45 Iustin Pop
    finally:
2368 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2369 decd5f45 Iustin Pop
2370 decd5f45 Iustin Pop
2371 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2372 a8083063 Iustin Pop
  """Remove an instance.
2373 a8083063 Iustin Pop

2374 a8083063 Iustin Pop
  """
2375 a8083063 Iustin Pop
  HPATH = "instance-remove"
2376 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2377 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2378 a8083063 Iustin Pop
2379 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2380 a8083063 Iustin Pop
    """Build hooks env.
2381 a8083063 Iustin Pop

2382 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2383 a8083063 Iustin Pop

2384 a8083063 Iustin Pop
    """
2385 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2386 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2387 a8083063 Iustin Pop
    return env, nl, nl
2388 a8083063 Iustin Pop
2389 a8083063 Iustin Pop
  def CheckPrereq(self):
2390 a8083063 Iustin Pop
    """Check prerequisites.
2391 a8083063 Iustin Pop

2392 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2393 a8083063 Iustin Pop

2394 a8083063 Iustin Pop
    """
2395 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2396 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2397 a8083063 Iustin Pop
    if instance is None:
2398 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2399 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2400 a8083063 Iustin Pop
    self.instance = instance
2401 a8083063 Iustin Pop
2402 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2403 a8083063 Iustin Pop
    """Remove the instance.
2404 a8083063 Iustin Pop

2405 a8083063 Iustin Pop
    """
2406 a8083063 Iustin Pop
    instance = self.instance
2407 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2408 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2409 a8083063 Iustin Pop
2410 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2411 1d67656e Iustin Pop
      if self.op.ignore_failures:
2412 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2413 1d67656e Iustin Pop
      else:
2414 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2415 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2416 a8083063 Iustin Pop
2417 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2418 a8083063 Iustin Pop
2419 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2420 1d67656e Iustin Pop
      if self.op.ignore_failures:
2421 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2422 1d67656e Iustin Pop
      else:
2423 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2424 a8083063 Iustin Pop
2425 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2426 a8083063 Iustin Pop
2427 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2428 a2fd9afc Guido Trotter
    # Remove the new instance from the Ganeti Lock Manager
2429 a2fd9afc Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, instance.name)
2430 a8083063 Iustin Pop
2431 a8083063 Iustin Pop
2432 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2433 a8083063 Iustin Pop
  """Logical unit for querying instances.
2434 a8083063 Iustin Pop

2435 a8083063 Iustin Pop
  """
2436 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2437 a8083063 Iustin Pop
2438 a8083063 Iustin Pop
  def CheckPrereq(self):
2439 a8083063 Iustin Pop
    """Check prerequisites.
2440 a8083063 Iustin Pop

2441 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2442 a8083063 Iustin Pop

2443 a8083063 Iustin Pop
    """
2444 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2445 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2446 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2447 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2448 130a6a6f Iustin Pop
                               "sda_size", "sdb_size", "vcpus", "tags"],
2449 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2450 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2451 a8083063 Iustin Pop
2452 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2453 069dcc86 Iustin Pop
2454 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2455 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2456 a8083063 Iustin Pop

2457 a8083063 Iustin Pop
    """
2458 069dcc86 Iustin Pop
    instance_names = self.wanted
2459 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2460 a8083063 Iustin Pop
                     in instance_names]
2461 a8083063 Iustin Pop
2462 a8083063 Iustin Pop
    # begin data gathering
2463 a8083063 Iustin Pop
2464 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2465 a8083063 Iustin Pop
2466 a8083063 Iustin Pop
    bad_nodes = []
2467 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2468 a8083063 Iustin Pop
      live_data = {}
2469 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2470 a8083063 Iustin Pop
      for name in nodes:
2471 a8083063 Iustin Pop
        result = node_data[name]
2472 a8083063 Iustin Pop
        if result:
2473 a8083063 Iustin Pop
          live_data.update(result)
2474 a8083063 Iustin Pop
        elif result == False:
2475 a8083063 Iustin Pop
          bad_nodes.append(name)
2476 a8083063 Iustin Pop
        # else no instance is alive
2477 a8083063 Iustin Pop
    else:
2478 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2479 a8083063 Iustin Pop
2480 a8083063 Iustin Pop
    # end data gathering
2481 a8083063 Iustin Pop
2482 a8083063 Iustin Pop
    output = []
2483 a8083063 Iustin Pop
    for instance in instance_list:
2484 a8083063 Iustin Pop
      iout = []
2485 a8083063 Iustin Pop
      for field in self.op.output_fields:
2486 a8083063 Iustin Pop
        if field == "name":
2487 a8083063 Iustin Pop
          val = instance.name
2488 a8083063 Iustin Pop
        elif field == "os":
2489 a8083063 Iustin Pop
          val = instance.os
2490 a8083063 Iustin Pop
        elif field == "pnode":
2491 a8083063 Iustin Pop
          val = instance.primary_node
2492 a8083063 Iustin Pop
        elif field == "snodes":
2493 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2494 a8083063 Iustin Pop
        elif field == "admin_state":
2495 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2496 a8083063 Iustin Pop
        elif field == "oper_state":
2497 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2498 8a23d2d3 Iustin Pop
            val = None
2499 a8083063 Iustin Pop
          else:
2500 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2501 d8052456 Iustin Pop
        elif field == "status":
2502 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2503 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2504 d8052456 Iustin Pop
          else:
2505 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2506 d8052456 Iustin Pop
            if running:
2507 d8052456 Iustin Pop
              if instance.status != "down":
2508 d8052456 Iustin Pop
                val = "running"
2509 d8052456 Iustin Pop
              else:
2510 d8052456 Iustin Pop
                val = "ERROR_up"
2511 d8052456 Iustin Pop
            else:
2512 d8052456 Iustin Pop
              if instance.status != "down":
2513 d8052456 Iustin Pop
                val = "ERROR_down"
2514 d8052456 Iustin Pop
              else:
2515 d8052456 Iustin Pop
                val = "ADMIN_down"
2516 a8083063 Iustin Pop
        elif field == "admin_ram":
2517 a8083063 Iustin Pop
          val = instance.memory
2518 a8083063 Iustin Pop
        elif field == "oper_ram":
2519 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2520 8a23d2d3 Iustin Pop
            val = None
2521 a8083063 Iustin Pop
          elif instance.name in live_data:
2522 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2523 a8083063 Iustin Pop
          else:
2524 a8083063 Iustin Pop
            val = "-"
2525 a8083063 Iustin Pop
        elif field == "disk_template":
2526 a8083063 Iustin Pop
          val = instance.disk_template
2527 a8083063 Iustin Pop
        elif field == "ip":
2528 a8083063 Iustin Pop
          val = instance.nics[0].ip
2529 a8083063 Iustin Pop
        elif field == "bridge":
2530 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2531 a8083063 Iustin Pop
        elif field == "mac":
2532 a8083063 Iustin Pop
          val = instance.nics[0].mac
2533 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2534 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2535 644eeef9 Iustin Pop
          if disk is None:
2536 8a23d2d3 Iustin Pop
            val = None
2537 644eeef9 Iustin Pop
          else:
2538 644eeef9 Iustin Pop
            val = disk.size
2539 d6d415e8 Iustin Pop
        elif field == "vcpus":
2540 d6d415e8 Iustin Pop
          val = instance.vcpus
2541 130a6a6f Iustin Pop
        elif field == "tags":
2542 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2543 a8083063 Iustin Pop
        else:
2544 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2545 a8083063 Iustin Pop
        iout.append(val)
2546 a8083063 Iustin Pop
      output.append(iout)
2547 a8083063 Iustin Pop
2548 a8083063 Iustin Pop
    return output
2549 a8083063 Iustin Pop
2550 a8083063 Iustin Pop
2551 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2552 a8083063 Iustin Pop
  """Failover an instance.
2553 a8083063 Iustin Pop

2554 a8083063 Iustin Pop
  """
2555 a8083063 Iustin Pop
  HPATH = "instance-failover"
2556 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2557 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2558 a8083063 Iustin Pop
2559 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2560 a8083063 Iustin Pop
    """Build hooks env.
2561 a8083063 Iustin Pop

2562 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2563 a8083063 Iustin Pop

2564 a8083063 Iustin Pop
    """
2565 a8083063 Iustin Pop
    env = {
2566 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2567 a8083063 Iustin Pop
      }
2568 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2569 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2570 a8083063 Iustin Pop
    return env, nl, nl
2571 a8083063 Iustin Pop
2572 a8083063 Iustin Pop
  def CheckPrereq(self):
2573 a8083063 Iustin Pop
    """Check prerequisites.
2574 a8083063 Iustin Pop

2575 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2576 a8083063 Iustin Pop

2577 a8083063 Iustin Pop
    """
2578 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2579 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2580 a8083063 Iustin Pop
    if instance is None:
2581 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2582 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2583 a8083063 Iustin Pop
2584 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2585 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2586 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2587 2a710df1 Michael Hanselmann
2588 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2589 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2590 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2591 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2592 2a710df1 Michael Hanselmann
2593 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2594 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2595 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2596 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2597 3a7c308e Guido Trotter
2598 a8083063 Iustin Pop
    # check bridge existance
2599 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2600 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2601 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2602 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2603 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2604 a8083063 Iustin Pop
2605 a8083063 Iustin Pop
    self.instance = instance
2606 a8083063 Iustin Pop
2607 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2608 a8083063 Iustin Pop
    """Failover an instance.
2609 a8083063 Iustin Pop

2610 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2611 a8083063 Iustin Pop
    starting it on the secondary.
2612 a8083063 Iustin Pop

2613 a8083063 Iustin Pop
    """
2614 a8083063 Iustin Pop
    instance = self.instance
2615 a8083063 Iustin Pop
2616 a8083063 Iustin Pop
    source_node = instance.primary_node
2617 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2618 a8083063 Iustin Pop
2619 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2620 a8083063 Iustin Pop
    for dev in instance.disks:
2621 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2622 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2623 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2624 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2625 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2626 a8083063 Iustin Pop
2627 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2628 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2629 a8083063 Iustin Pop
                (instance.name, source_node))
2630 a8083063 Iustin Pop
2631 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2632 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2633 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2634 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2635 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2636 24a40d57 Iustin Pop
      else:
2637 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2638 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2639 a8083063 Iustin Pop
2640 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2641 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2642 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2643 a8083063 Iustin Pop
2644 a8083063 Iustin Pop
    instance.primary_node = target_node
2645 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2646 b6102dab Guido Trotter
    self.cfg.Update(instance)
2647 a8083063 Iustin Pop
2648 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2649 12a0cfbe Guido Trotter
    if instance.status == "up":
2650 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2651 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2652 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2653 12a0cfbe Guido Trotter
2654 12a0cfbe Guido Trotter
      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2655 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2656 12a0cfbe Guido Trotter
      if not disks_ok:
2657 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2658 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2659 a8083063 Iustin Pop
2660 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2661 12a0cfbe Guido Trotter
      if not rpc.call_instance_start(target_node, instance, None):
2662 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2663 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2664 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2665 a8083063 Iustin Pop
2666 a8083063 Iustin Pop
2667 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2668 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2669 a8083063 Iustin Pop

2670 a8083063 Iustin Pop
  This always creates all devices.
2671 a8083063 Iustin Pop

2672 a8083063 Iustin Pop
  """
2673 a8083063 Iustin Pop
  if device.children:
2674 a8083063 Iustin Pop
    for child in device.children:
2675 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2676 a8083063 Iustin Pop
        return False
2677 a8083063 Iustin Pop
2678 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2679 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2680 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2681 a8083063 Iustin Pop
  if not new_id:
2682 a8083063 Iustin Pop
    return False
2683 a8083063 Iustin Pop
  if device.physical_id is None:
2684 a8083063 Iustin Pop
    device.physical_id = new_id
2685 a8083063 Iustin Pop
  return True
2686 a8083063 Iustin Pop
2687 a8083063 Iustin Pop
2688 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2689 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2690 a8083063 Iustin Pop

2691 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2692 a8083063 Iustin Pop
  all its children.
2693 a8083063 Iustin Pop

2694 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2695 a8083063 Iustin Pop

2696 a8083063 Iustin Pop
  """
2697 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2698 a8083063 Iustin Pop
    force = True
2699 a8083063 Iustin Pop
  if device.children:
2700 a8083063 Iustin Pop
    for child in device.children:
2701 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2702 3f78eef2 Iustin Pop
                                        child, force, info):
2703 a8083063 Iustin Pop
        return False
2704 a8083063 Iustin Pop
2705 a8083063 Iustin Pop
  if not force:
2706 a8083063 Iustin Pop
    return True
2707 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2708 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2709 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2710 a8083063 Iustin Pop
  if not new_id:
2711 a8083063 Iustin Pop
    return False
2712 a8083063 Iustin Pop
  if device.physical_id is None:
2713 a8083063 Iustin Pop
    device.physical_id = new_id
2714 a8083063 Iustin Pop
  return True
2715 a8083063 Iustin Pop
2716 a8083063 Iustin Pop
2717 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2718 923b1523 Iustin Pop
  """Generate a suitable LV name.
2719 923b1523 Iustin Pop

2720 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2721 923b1523 Iustin Pop

2722 923b1523 Iustin Pop
  """
2723 923b1523 Iustin Pop
  results = []
2724 923b1523 Iustin Pop
  for val in exts:
2725 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2726 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2727 923b1523 Iustin Pop
  return results
2728 923b1523 Iustin Pop
2729 923b1523 Iustin Pop
2730 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2731 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2732 a1f445d3 Iustin Pop

2733 a1f445d3 Iustin Pop
  """
2734 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2735 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2736 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2737 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2738 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2739 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2740 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2741 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2742 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2743 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2744 a1f445d3 Iustin Pop
  return drbd_dev
2745 a1f445d3 Iustin Pop
2746 7c0d6283 Michael Hanselmann
2747 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2748 a8083063 Iustin Pop
                          instance_name, primary_node,
2749 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
2750 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
2751 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2752 a8083063 Iustin Pop

2753 a8083063 Iustin Pop
  """
2754 a8083063 Iustin Pop
  #TODO: compute space requirements
2755 a8083063 Iustin Pop
2756 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2757 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2758 a8083063 Iustin Pop
    disks = []
2759 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2760 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2761 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2762 923b1523 Iustin Pop
2763 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2764 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2765 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2766 a8083063 Iustin Pop
                           iv_name = "sda")
2767 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2768 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2769 a8083063 Iustin Pop
                           iv_name = "sdb")
2770 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2771 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2772 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2773 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2774 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2775 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2776 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2777 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2778 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2779 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2780 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2781 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2782 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
2783 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
2784 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
2785 0f1a06e3 Manuel Franceschini
2786 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
2787 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
2788 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
2789 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
2790 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
2791 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
2792 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
2793 a8083063 Iustin Pop
  else:
2794 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2795 a8083063 Iustin Pop
  return disks
2796 a8083063 Iustin Pop
2797 a8083063 Iustin Pop
2798 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2799 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2800 3ecf6786 Iustin Pop

2801 3ecf6786 Iustin Pop
  """
2802 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2803 a0c3fea1 Michael Hanselmann
2804 a0c3fea1 Michael Hanselmann
2805 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2806 a8083063 Iustin Pop
  """Create all disks for an instance.
2807 a8083063 Iustin Pop

2808 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2809 a8083063 Iustin Pop

2810 a8083063 Iustin Pop
  Args:
2811 a8083063 Iustin Pop
    instance: the instance object
2812 a8083063 Iustin Pop

2813 a8083063 Iustin Pop
  Returns:
2814 a8083063 Iustin Pop
    True or False showing the success of the creation process
2815 a8083063 Iustin Pop

2816 a8083063 Iustin Pop
  """
2817 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2818 a0c3fea1 Michael Hanselmann
2819 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2820 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2821 0f1a06e3 Manuel Franceschini
    result = rpc.call_file_storage_dir_create(instance.primary_node,
2822 0f1a06e3 Manuel Franceschini
                                              file_storage_dir)
2823 0f1a06e3 Manuel Franceschini
2824 0f1a06e3 Manuel Franceschini
    if not result:
2825 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
2826 0f1a06e3 Manuel Franceschini
      return False
2827 0f1a06e3 Manuel Franceschini
2828 0f1a06e3 Manuel Franceschini
    if not result[0]:
2829 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
2830 0f1a06e3 Manuel Franceschini
      return False
2831 0f1a06e3 Manuel Franceschini
2832 a8083063 Iustin Pop
  for device in instance.disks:
2833 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2834 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
2835 a8083063 Iustin Pop
    #HARDCODE
2836 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2837 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2838 3f78eef2 Iustin Pop
                                        device, False, info):
2839 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2840 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2841 a8083063 Iustin Pop
        return False
2842 a8083063 Iustin Pop
    #HARDCODE
2843 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2844 3f78eef2 Iustin Pop
                                    instance, device, info):
2845 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2846 a8083063 Iustin Pop
                   device.iv_name)
2847 a8083063 Iustin Pop
      return False
2848 1c6e3627 Manuel Franceschini
2849 a8083063 Iustin Pop
  return True
2850 a8083063 Iustin Pop
2851 a8083063 Iustin Pop
2852 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2853 a8083063 Iustin Pop
  """Remove all disks for an instance.
2854 a8083063 Iustin Pop

2855 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2856 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2857 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2858 a8083063 Iustin Pop
  with `_CreateDisks()`).
2859 a8083063 Iustin Pop

2860 a8083063 Iustin Pop
  Args:
2861 a8083063 Iustin Pop
    instance: the instance object
2862 a8083063 Iustin Pop

2863 a8083063 Iustin Pop
  Returns:
2864 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2865 a8083063 Iustin Pop

2866 a8083063 Iustin Pop
  """
2867 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2868 a8083063 Iustin Pop
2869 a8083063 Iustin Pop
  result = True
2870 a8083063 Iustin Pop
  for device in instance.disks:
2871 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2872 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2873 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2874 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2875 a8083063 Iustin Pop
                     " continuing anyway" %
2876 a8083063 Iustin Pop
                     (device.iv_name, node))
2877 a8083063 Iustin Pop
        result = False
2878 0f1a06e3 Manuel Franceschini
2879 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2880 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2881 0f1a06e3 Manuel Franceschini
    if not rpc.call_file_storage_dir_remove(instance.primary_node,
2882 0f1a06e3 Manuel Franceschini
                                            file_storage_dir):
2883 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
2884 0f1a06e3 Manuel Franceschini
      result = False
2885 0f1a06e3 Manuel Franceschini
2886 a8083063 Iustin Pop
  return result
2887 a8083063 Iustin Pop
2888 a8083063 Iustin Pop
2889 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
2890 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
2891 e2fe6369 Iustin Pop

2892 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
2893 e2fe6369 Iustin Pop

2894 e2fe6369 Iustin Pop
  """
2895 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
2896 e2fe6369 Iustin Pop
  req_size_dict = {
2897 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
2898 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
2899 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
2900 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
2901 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
2902 e2fe6369 Iustin Pop
  }
2903 e2fe6369 Iustin Pop
2904 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
2905 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
2906 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
2907 e2fe6369 Iustin Pop
2908 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
2909 e2fe6369 Iustin Pop
2910 e2fe6369 Iustin Pop
2911 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2912 a8083063 Iustin Pop
  """Create an instance.
2913 a8083063 Iustin Pop

2914 a8083063 Iustin Pop
  """
2915 a8083063 Iustin Pop
  HPATH = "instance-add"
2916 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2917 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
2918 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2919 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
2920 a8083063 Iustin Pop
2921 538475ca Iustin Pop
  def _RunAllocator(self):
2922 538475ca Iustin Pop
    """Run the allocator based on input opcode.
2923 538475ca Iustin Pop

2924 538475ca Iustin Pop
    """
2925 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
2926 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
2927 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
2928 538475ca Iustin Pop
             "bridge": self.op.bridge}]
2929 d1c2dd75 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
2930 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
2931 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
2932 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
2933 d1c2dd75 Iustin Pop
                     tags=[],
2934 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
2935 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
2936 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
2937 d1c2dd75 Iustin Pop
                     disks=disks,
2938 d1c2dd75 Iustin Pop
                     nics=nics,
2939 29859cb7 Iustin Pop
                     )
2940 d1c2dd75 Iustin Pop
2941 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
2942 d1c2dd75 Iustin Pop
2943 d1c2dd75 Iustin Pop
    if not ial.success:
2944 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
2945 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
2946 d1c2dd75 Iustin Pop
                                                           ial.info))
2947 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
2948 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
2949 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
2950 27579978 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
2951 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
2952 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
2953 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
2954 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
2955 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
2956 27579978 Iustin Pop
    if ial.required_nodes == 2:
2957 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
2958 538475ca Iustin Pop
2959 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2960 a8083063 Iustin Pop
    """Build hooks env.
2961 a8083063 Iustin Pop

2962 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2963 a8083063 Iustin Pop

2964 a8083063 Iustin Pop
    """
2965 a8083063 Iustin Pop
    env = {
2966 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2967 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2968 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2969 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2970 a8083063 Iustin Pop
      }
2971 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2972 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2973 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2974 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2975 396e1b78 Michael Hanselmann
2976 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2977 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2978 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2979 396e1b78 Michael Hanselmann
      status=self.instance_status,
2980 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2981 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2982 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2983 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
2984 396e1b78 Michael Hanselmann
    ))
2985 a8083063 Iustin Pop
2986 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2987 a8083063 Iustin Pop
          self.secondaries)
2988 a8083063 Iustin Pop
    return env, nl, nl
2989 a8083063 Iustin Pop
2990 a8083063 Iustin Pop
2991 a8083063 Iustin Pop
  def CheckPrereq(self):
2992 a8083063 Iustin Pop
    """Check prerequisites.
2993 a8083063 Iustin Pop

2994 a8083063 Iustin Pop
    """
2995 538475ca Iustin Pop
    # set optional parameters to none if they don't exist
2996 538475ca Iustin Pop
    for attr in ["kernel_path", "initrd_path", "hvm_boot_order", "pnode",
2997 31a853d2 Iustin Pop
                 "iallocator", "hvm_acpi", "hvm_pae", "hvm_cdrom_image_path",
2998 31a853d2 Iustin Pop
                 "vnc_bind_address"]:
2999 40ed12dd Guido Trotter
      if not hasattr(self.op, attr):
3000 40ed12dd Guido Trotter
        setattr(self.op, attr, None)
3001 40ed12dd Guido Trotter
3002 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
3003 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
3004 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3005 3ecf6786 Iustin Pop
                                 self.op.mode)
3006 a8083063 Iustin Pop
3007 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3008 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3009 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3010 eedc99de Manuel Franceschini
                                 " instances")
3011 eedc99de Manuel Franceschini
3012 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3013 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
3014 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
3015 a8083063 Iustin Pop
      if src_node is None or src_path is None:
3016 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
3017 3ecf6786 Iustin Pop
                                   " node and path options")
3018 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
3019 a8083063 Iustin Pop
      if src_node_full is None:
3020 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
3021 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
3022 a8083063 Iustin Pop
3023 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
3024 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
3025 a8083063 Iustin Pop
3026 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
3027 a8083063 Iustin Pop
3028 a8083063 Iustin Pop
      if not export_info:
3029 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3030 a8083063 Iustin Pop
3031 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3032 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3033 a8083063 Iustin Pop
3034 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3035 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3036 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3037 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3038 a8083063 Iustin Pop
3039 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3040 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3041 3ecf6786 Iustin Pop
                                   " one data disk")
3042 a8083063 Iustin Pop
3043 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3044 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3045 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3046 a8083063 Iustin Pop
                                                         'disk0_dump'))
3047 a8083063 Iustin Pop
      self.src_image = diskimage
3048 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
3049 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
3050 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
3051 a8083063 Iustin Pop
3052 901a65c1 Iustin Pop
    #### instance parameters check
3053 901a65c1 Iustin Pop
3054 a8083063 Iustin Pop
    # disk template and mirror node verification
3055 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3056 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
3057 a8083063 Iustin Pop
3058 901a65c1 Iustin Pop
    # instance name verification
3059 901a65c1 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
3060 901a65c1 Iustin Pop
3061 901a65c1 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
3062 901a65c1 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
3063 901a65c1 Iustin Pop
    if instance_name in instance_list:
3064 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3065 901a65c1 Iustin Pop
                                 instance_name)
3066 901a65c1 Iustin Pop
3067 901a65c1 Iustin Pop
    # ip validity checks
3068 901a65c1 Iustin Pop
    ip = getattr(self.op, "ip", None)
3069 901a65c1 Iustin Pop
    if ip is None or ip.lower() == "none":
3070 901a65c1 Iustin Pop
      inst_ip = None
3071 901a65c1 Iustin Pop
    elif ip.lower() == "auto":
3072 901a65c1 Iustin Pop
      inst_ip = hostname1.ip
3073 901a65c1 Iustin Pop
    else:
3074 901a65c1 Iustin Pop
      if not utils.IsValidIP(ip):
3075 901a65c1 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3076 901a65c1 Iustin Pop
                                   " like a valid IP" % ip)
3077 901a65c1 Iustin Pop
      inst_ip = ip
3078 901a65c1 Iustin Pop
    self.inst_ip = self.op.ip = inst_ip
3079 901a65c1 Iustin Pop
3080 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3081 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3082 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3083 901a65c1 Iustin Pop
3084 901a65c1 Iustin Pop
    if self.op.ip_check:
3085 901a65c1 Iustin Pop
      if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT):
3086 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3087 901a65c1 Iustin Pop
                                   (hostname1.ip, instance_name))
3088 901a65c1 Iustin Pop
3089 901a65c1 Iustin Pop
    # MAC address verification
3090 901a65c1 Iustin Pop
    if self.op.mac != "auto":
3091 901a65c1 Iustin Pop
      if not utils.IsValidMac(self.op.mac.lower()):
3092 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3093 901a65c1 Iustin Pop
                                   self.op.mac)
3094 901a65c1 Iustin Pop
3095 901a65c1 Iustin Pop
    # bridge verification
3096 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3097 901a65c1 Iustin Pop
    if bridge is None:
3098 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3099 901a65c1 Iustin Pop
    else:
3100 901a65c1 Iustin Pop
      self.op.bridge = bridge
3101 901a65c1 Iustin Pop
3102 901a65c1 Iustin Pop
    # boot order verification
3103 901a65c1 Iustin Pop
    if self.op.hvm_boot_order is not None:
3104 901a65c1 Iustin Pop
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3105 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid boot order specified,"
3106 901a65c1 Iustin Pop
                                   " must be one or more of [acdn]")
3107 901a65c1 Iustin Pop
    # file storage checks
3108 0f1a06e3 Manuel Franceschini
    if (self.op.file_driver and
3109 0f1a06e3 Manuel Franceschini
        not self.op.file_driver in constants.FILE_DRIVER):
3110 0f1a06e3 Manuel Franceschini
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3111 0f1a06e3 Manuel Franceschini
                                 self.op.file_driver)
3112 0f1a06e3 Manuel Franceschini
3113 0f1a06e3 Manuel Franceschini
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3114 b4de68a9 Iustin Pop
      raise errors.OpPrereqError("File storage directory not a relative"
3115 b4de68a9 Iustin Pop
                                 " path")
3116 538475ca Iustin Pop
    #### allocator run
3117 538475ca Iustin Pop
3118 538475ca Iustin Pop
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3119 538475ca Iustin Pop
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3120 538475ca Iustin Pop
                                 " node must be given")
3121 538475ca Iustin Pop
3122 538475ca Iustin Pop
    if self.op.iallocator is not None:
3123 538475ca Iustin Pop
      self._RunAllocator()
3124 0f1a06e3 Manuel Franceschini
3125 901a65c1 Iustin Pop
    #### node related checks
3126 901a65c1 Iustin Pop
3127 901a65c1 Iustin Pop
    # check primary node
3128 901a65c1 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
3129 901a65c1 Iustin Pop
    if pnode is None:
3130 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
3131 901a65c1 Iustin Pop
                                 self.op.pnode)
3132 901a65c1 Iustin Pop
    self.op.pnode = pnode.name
3133 901a65c1 Iustin Pop
    self.pnode = pnode
3134 901a65c1 Iustin Pop
    self.secondaries = []
3135 901a65c1 Iustin Pop
3136 901a65c1 Iustin Pop
    # mirror node verification
3137 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3138 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
3139 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3140 3ecf6786 Iustin Pop
                                   " a mirror node")
3141 a8083063 Iustin Pop
3142 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
3143 a8083063 Iustin Pop
      if snode_name is None:
3144 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
3145 3ecf6786 Iustin Pop
                                   self.op.snode)
3146 a8083063 Iustin Pop
      elif snode_name == pnode.name:
3147 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3148 3ecf6786 Iustin Pop
                                   " the primary node.")
3149 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
3150 a8083063 Iustin Pop
3151 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3152 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3153 ed1ebc60 Guido Trotter
3154 8d75db10 Iustin Pop
    # Check lv size requirements
3155 8d75db10 Iustin Pop
    if req_size is not None:
3156 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3157 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3158 8d75db10 Iustin Pop
      for node in nodenames:
3159 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3160 8d75db10 Iustin Pop
        if not info:
3161 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3162 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3163 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3164 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3165 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3166 8d75db10 Iustin Pop
                                     " node %s" % node)
3167 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3168 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3169 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3170 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3171 ed1ebc60 Guido Trotter
3172 a8083063 Iustin Pop
    # os verification
3173 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3174 dfa96ded Guido Trotter
    if not os_obj:
3175 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3176 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3177 a8083063 Iustin Pop
3178 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3179 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3180 3b6d8c9b Iustin Pop
3181 a8083063 Iustin Pop
3182 901a65c1 Iustin Pop
    # bridge check on primary node
3183 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3184 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3185 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3186 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3187 a8083063 Iustin Pop
3188 49ce1563 Iustin Pop
    # memory check on primary node
3189 49ce1563 Iustin Pop
    if self.op.start:
3190 49ce1563 Iustin Pop
      _CheckNodeFreeMemory(self.cfg, self.pnode.name,
3191 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3192 49ce1563 Iustin Pop
                           self.op.mem_size)
3193 49ce1563 Iustin Pop
3194 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
3195 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
3196 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
3197 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
3198 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
3199 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3200 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
3201 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
3202 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
3203 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
3204 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3205 31a853d2 Iustin Pop
3206 31a853d2 Iustin Pop
    # vnc_bind_address verification
3207 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
3208 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
3209 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
3210 31a853d2 Iustin Pop
                                   " like a valid IP address" %
3211 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
3212 31a853d2 Iustin Pop
3213 a8083063 Iustin Pop
    if self.op.start:
3214 a8083063 Iustin Pop
      self.instance_status = 'up'
3215 a8083063 Iustin Pop
    else:
3216 a8083063 Iustin Pop
      self.instance_status = 'down'
3217 a8083063 Iustin Pop
3218 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3219 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3220 a8083063 Iustin Pop

3221 a8083063 Iustin Pop
    """
3222 a8083063 Iustin Pop
    instance = self.op.instance_name
3223 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3224 a8083063 Iustin Pop
3225 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3226 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3227 1862d460 Alexander Schreiber
    else:
3228 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3229 1862d460 Alexander Schreiber
3230 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3231 a8083063 Iustin Pop
    if self.inst_ip is not None:
3232 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3233 a8083063 Iustin Pop
3234 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3235 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3236 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3237 2a6469d5 Alexander Schreiber
    else:
3238 2a6469d5 Alexander Schreiber
      network_port = None
3239 58acb49d Alexander Schreiber
3240 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is None:
3241 31a853d2 Iustin Pop
      self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3242 31a853d2 Iustin Pop
3243 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3244 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3245 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3246 2c313123 Manuel Franceschini
    else:
3247 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3248 2c313123 Manuel Franceschini
3249 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3250 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3251 0f1a06e3 Manuel Franceschini
                                        self.sstore.GetFileStorageDir(),
3252 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3253 0f1a06e3 Manuel Franceschini
3254 0f1a06e3 Manuel Franceschini
3255 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3256 a8083063 Iustin Pop
                                  self.op.disk_template,
3257 a8083063 Iustin Pop
                                  instance, pnode_name,
3258 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3259 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3260 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3261 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3262 a8083063 Iustin Pop
3263 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3264 a8083063 Iustin Pop
                            primary_node=pnode_name,
3265 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3266 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3267 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3268 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3269 a8083063 Iustin Pop
                            status=self.instance_status,
3270 58acb49d Alexander Schreiber
                            network_port=network_port,
3271 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3272 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3273 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3274 31a853d2 Iustin Pop
                            hvm_acpi=self.op.hvm_acpi,
3275 31a853d2 Iustin Pop
                            hvm_pae=self.op.hvm_pae,
3276 31a853d2 Iustin Pop
                            hvm_cdrom_image_path=self.op.hvm_cdrom_image_path,
3277 31a853d2 Iustin Pop
                            vnc_bind_address=self.op.vnc_bind_address,
3278 a8083063 Iustin Pop
                            )
3279 a8083063 Iustin Pop
3280 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3281 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3282 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3283 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3284 a8083063 Iustin Pop
3285 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3286 a8083063 Iustin Pop
3287 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3288 a2fd9afc Guido Trotter
    # Add the new instance to the Ganeti Lock Manager
3289 a2fd9afc Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, instance)
3290 a8083063 Iustin Pop
3291 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3292 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3293 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3294 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3295 a8083063 Iustin Pop
      time.sleep(15)
3296 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3297 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3298 a8083063 Iustin Pop
    else:
3299 a8083063 Iustin Pop
      disk_abort = False
3300 a8083063 Iustin Pop
3301 a8083063 Iustin Pop
    if disk_abort:
3302 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3303 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3304 a2fd9afc Guido Trotter
      # Remove the new instance from the Ganeti Lock Manager
3305 a2fd9afc Guido Trotter
      self.context.glm.remove(locking.LEVEL_INSTANCE, iobj.name)
3306 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3307 3ecf6786 Iustin Pop
                               " this instance")
3308 a8083063 Iustin Pop
3309 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3310 a8083063 Iustin Pop
                (instance, pnode_name))
3311 a8083063 Iustin Pop
3312 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3313 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3314 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3315 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3316 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3317 3ecf6786 Iustin Pop
                                   " on node %s" %
3318 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3319 a8083063 Iustin Pop
3320 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3321 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3322 a8083063 Iustin Pop
        src_node = self.op.src_node
3323 a8083063 Iustin Pop
        src_image = self.src_image
3324 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3325 a8083063 Iustin Pop
                                                src_node, src_image):
3326 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3327 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3328 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3329 a8083063 Iustin Pop
      else:
3330 a8083063 Iustin Pop
        # also checked in the prereq part
3331 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3332 3ecf6786 Iustin Pop
                                     % self.op.mode)
3333 a8083063 Iustin Pop
3334 a8083063 Iustin Pop
    if self.op.start:
3335 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3336 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3337 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3338 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3339 a8083063 Iustin Pop
3340 a8083063 Iustin Pop
3341 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3342 a8083063 Iustin Pop
  """Connect to an instance's console.
3343 a8083063 Iustin Pop

3344 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3345 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3346 a8083063 Iustin Pop
  console.
3347 a8083063 Iustin Pop

3348 a8083063 Iustin Pop
  """
3349 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3350 8659b73e Guido Trotter
  REQ_BGL = False
3351 8659b73e Guido Trotter
3352 8659b73e Guido Trotter
  def ExpandNames(self):
3353 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3354 a8083063 Iustin Pop
3355 a8083063 Iustin Pop
  def CheckPrereq(self):
3356 a8083063 Iustin Pop
    """Check prerequisites.
3357 a8083063 Iustin Pop

3358 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3359 a8083063 Iustin Pop

3360 a8083063 Iustin Pop
    """
3361 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3362 8659b73e Guido Trotter
    assert self.instance is not None, \
3363 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3364 a8083063 Iustin Pop
3365 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3366 a8083063 Iustin Pop
    """Connect to the console of an instance
3367 a8083063 Iustin Pop

3368 a8083063 Iustin Pop
    """
3369 a8083063 Iustin Pop
    instance = self.instance
3370 a8083063 Iustin Pop
    node = instance.primary_node
3371 a8083063 Iustin Pop
3372 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3373 a8083063 Iustin Pop
    if node_insts is False:
3374 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3375 a8083063 Iustin Pop
3376 a8083063 Iustin Pop
    if instance.name not in node_insts:
3377 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3378 a8083063 Iustin Pop
3379 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3380 a8083063 Iustin Pop
3381 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3382 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3383 b047857b Michael Hanselmann
3384 82122173 Iustin Pop
    # build ssh cmdline
3385 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3386 a8083063 Iustin Pop
3387 a8083063 Iustin Pop
3388 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3389 a8083063 Iustin Pop
  """Replace the disks of an instance.
3390 a8083063 Iustin Pop

3391 a8083063 Iustin Pop
  """
3392 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3393 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3394 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3395 a8083063 Iustin Pop
3396 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3397 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3398 b6e82a65 Iustin Pop

3399 b6e82a65 Iustin Pop
    """
3400 b6e82a65 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3401 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3402 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3403 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3404 b6e82a65 Iustin Pop
3405 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3406 b6e82a65 Iustin Pop
3407 b6e82a65 Iustin Pop
    if not ial.success:
3408 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3409 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3410 b6e82a65 Iustin Pop
                                                           ial.info))
3411 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3412 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3413 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3414 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3415 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3416 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3417 b6e82a65 Iustin Pop
                    self.op.remote_node)
3418 b6e82a65 Iustin Pop
3419 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3420 a8083063 Iustin Pop
    """Build hooks env.
3421 a8083063 Iustin Pop

3422 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3423 a8083063 Iustin Pop

3424 a8083063 Iustin Pop
    """
3425 a8083063 Iustin Pop
    env = {
3426 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3427 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3428 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3429 a8083063 Iustin Pop
      }
3430 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3431 0834c866 Iustin Pop
    nl = [
3432 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3433 0834c866 Iustin Pop
      self.instance.primary_node,
3434 0834c866 Iustin Pop
      ]
3435 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3436 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3437 a8083063 Iustin Pop
    return env, nl, nl
3438 a8083063 Iustin Pop
3439 a8083063 Iustin Pop
  def CheckPrereq(self):
3440 a8083063 Iustin Pop
    """Check prerequisites.
3441 a8083063 Iustin Pop

3442 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3443 a8083063 Iustin Pop

3444 a8083063 Iustin Pop
    """
3445 b6e82a65 Iustin Pop
    if not hasattr(self.op, "remote_node"):
3446 b6e82a65 Iustin Pop
      self.op.remote_node = None
3447 b6e82a65 Iustin Pop
3448 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3449 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3450 a8083063 Iustin Pop
    if instance is None:
3451 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3452 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3453 a8083063 Iustin Pop
    self.instance = instance
3454 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3455 a8083063 Iustin Pop
3456 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3457 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3458 a9e0c397 Iustin Pop
                                 " network mirrored.")
3459 a8083063 Iustin Pop
3460 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3461 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3462 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3463 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3464 a8083063 Iustin Pop
3465 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3466 a9e0c397 Iustin Pop
3467 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3468 b6e82a65 Iustin Pop
    if ia_name is not None:
3469 b6e82a65 Iustin Pop
      if self.op.remote_node is not None:
3470 b6e82a65 Iustin Pop
        raise errors.OpPrereqError("Give either the iallocator or the new"
3471 b6e82a65 Iustin Pop
                                   " secondary, not both")
3472 b6e82a65 Iustin Pop
      self.op.remote_node = self._RunAllocator()
3473 b6e82a65 Iustin Pop
3474 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3475 a9e0c397 Iustin Pop
    if remote_node is not None:
3476 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3477 a8083063 Iustin Pop
      if remote_node is None:
3478 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3479 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3480 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3481 a9e0c397 Iustin Pop
    else:
3482 a9e0c397 Iustin Pop
      self.remote_node_info = None
3483 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3484 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3485 3ecf6786 Iustin Pop
                                 " the instance.")
3486 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3487 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3488 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3489 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3490 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3491 0834c866 Iustin Pop
                                   " replacement")
3492 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3493 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3494 7df43a76 Iustin Pop
          remote_node is not None):
3495 7df43a76 Iustin Pop
        # switch to replace secondary mode
3496 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3497 7df43a76 Iustin Pop
3498 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3499 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3500 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3501 a9e0c397 Iustin Pop
                                   " both at once")
3502 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3503 a9e0c397 Iustin Pop
        if remote_node is not None:
3504 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3505 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3506 a9e0c397 Iustin Pop
                                     " node disk replacement")
3507 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3508 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3509 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3510 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3511 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3512 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3513 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3514 a9e0c397 Iustin Pop
      else:
3515 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3516 a9e0c397 Iustin Pop
3517 a9e0c397 Iustin Pop
    for name in self.op.disks:
3518 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3519 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3520 a9e0c397 Iustin Pop
                                   (name, instance.name))
3521 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3522 a8083063 Iustin Pop
3523 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3524 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3525 a9e0c397 Iustin Pop

3526 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3527 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3528 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3529 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3530 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3531 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3532 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3533 a9e0c397 Iustin Pop
      - wait for sync across all devices
3534 a9e0c397 Iustin Pop
      - for each modified disk:
3535 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3536 a9e0c397 Iustin Pop

3537 a9e0c397 Iustin Pop
    Failures are not very well handled.
3538 cff90b79 Iustin Pop

3539 a9e0c397 Iustin Pop
    """
3540 cff90b79 Iustin Pop
    steps_total = 6
3541 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3542 a9e0c397 Iustin Pop
    instance = self.instance
3543 a9e0c397 Iustin Pop
    iv_names = {}
3544 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3545 a9e0c397 Iustin Pop
    # start of work
3546 a9e0c397 Iustin Pop
    cfg = self.cfg
3547 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3548 cff90b79 Iustin Pop
    oth_node = self.oth_node
3549 cff90b79 Iustin Pop
3550 cff90b79 Iustin Pop
    # Step: check device activation
3551 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3552 cff90b79 Iustin Pop
    info("checking volume groups")
3553 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3554 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3555 cff90b79 Iustin Pop
    if not results:
3556 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3557 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3558 cff90b79 Iustin Pop
      res = results.get(node, False)
3559 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3560 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3561 cff90b79 Iustin Pop
                                 (my_vg, node))
3562 cff90b79 Iustin Pop
    for dev in instance.disks:
3563 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3564 cff90b79 Iustin Pop
        continue
3565 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3566 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3567 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3568 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3569 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3570 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3571 cff90b79 Iustin Pop
3572 cff90b79 Iustin Pop
    # Step: check other node consistency
3573 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3574 cff90b79 Iustin Pop
    for dev in instance.disks:
3575 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3576 cff90b79 Iustin Pop
        continue
3577 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3578 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3579 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3580 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3581 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3582 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3583 cff90b79 Iustin Pop
3584 cff90b79 Iustin Pop
    # Step: create new storage
3585 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3586 a9e0c397 Iustin Pop
    for dev in instance.disks:
3587 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3588 a9e0c397 Iustin Pop
        continue
3589 a9e0c397 Iustin Pop
      size = dev.size
3590 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3591 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3592 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3593 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3594 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3595 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3596 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3597 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3598 a9e0c397 Iustin Pop
      old_lvs = dev.children
3599 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3600 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3601 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3602 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3603 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3604 a9e0c397 Iustin Pop
      # are talking about the secondary node
3605 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3606 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3607 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3608 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3609 a9e0c397 Iustin Pop
                                   " node '%s'" %
3610 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3611 a9e0c397 Iustin Pop
3612 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3613 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3614 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3615 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3616 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3617 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3618 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3619 cff90b79 Iustin Pop
      #dev.children = []
3620 cff90b79 Iustin Pop
      #cfg.Update(instance)
3621 a9e0c397 Iustin Pop
3622 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3623 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3624 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3625 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3626 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3627 cff90b79 Iustin Pop
3628 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3629 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3630 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3631 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3632 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3633 cff90b79 Iustin Pop
      rlist = []
3634 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3635 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3636 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3637 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3638 cff90b79 Iustin Pop
3639 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3640 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3641 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3642 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3643 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3644 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3645 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3646 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3647 cff90b79 Iustin Pop
3648 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3649 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3650 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3651 a9e0c397 Iustin Pop
3652 cff90b79 Iustin Pop
      for disk in old_lvs:
3653 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3654 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3655 a9e0c397 Iustin Pop
3656 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3657 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3658 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3659 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3660 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3661 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3662 cff90b79 Iustin Pop
                    " logical volumes")
3663 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3664 a9e0c397 Iustin Pop
3665 a9e0c397 Iustin Pop
      dev.children = new_lvs
3666 a9e0c397 Iustin Pop
      cfg.Update(instance)
3667 a9e0c397 Iustin Pop
3668 cff90b79 Iustin Pop
    # Step: wait for sync
3669 a9e0c397 Iustin Pop
3670 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3671 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3672 a9e0c397 Iustin Pop
    # return value
3673 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3674 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3675 a9e0c397 Iustin Pop
3676 a9e0c397 Iustin Pop
    # so check manually all the devices
3677 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3678 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3679 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3680 a9e0c397 Iustin Pop
      if is_degr:
3681 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3682 a9e0c397 Iustin Pop
3683 cff90b79 Iustin Pop
    # Step: remove old storage
3684 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3685 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3686 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3687 a9e0c397 Iustin Pop
      for lv in old_lvs:
3688 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3689 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3690 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3691 a9e0c397 Iustin Pop
          continue
3692 a9e0c397 Iustin Pop
3693 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3694 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3695 a9e0c397 Iustin Pop

3696 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3697 a9e0c397 Iustin Pop
      - for all disks of the instance:
3698 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3699 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3700 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3701 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3702 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3703 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3704 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3705 a9e0c397 Iustin Pop
          not network enabled
3706 a9e0c397 Iustin Pop
      - wait for sync across all devices
3707 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3708 a9e0c397 Iustin Pop

3709 a9e0c397 Iustin Pop
    Failures are not very well handled.
3710 0834c866 Iustin Pop

3711 a9e0c397 Iustin Pop
    """
3712 0834c866 Iustin Pop
    steps_total = 6
3713 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3714 a9e0c397 Iustin Pop
    instance = self.instance
3715 a9e0c397 Iustin Pop
    iv_names = {}
3716 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3717 a9e0c397 Iustin Pop
    # start of work
3718 a9e0c397 Iustin Pop
    cfg = self.cfg
3719 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3720 a9e0c397 Iustin Pop
    new_node = self.new_node
3721 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3722 0834c866 Iustin Pop
3723 0834c866 Iustin Pop
    # Step: check device activation
3724 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3725 0834c866 Iustin Pop
    info("checking volume groups")
3726 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3727 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3728 0834c866 Iustin Pop
    if not results:
3729 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3730 0834c866 Iustin Pop
    for node in pri_node, new_node:
3731 0834c866 Iustin Pop
      res = results.get(node, False)
3732 0834c866 Iustin Pop
      if not res or my_vg not in res:
3733 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3734 0834c866 Iustin Pop
                                 (my_vg, node))
3735 0834c866 Iustin Pop
    for dev in instance.disks:
3736 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3737 0834c866 Iustin Pop
        continue
3738 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3739 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3740 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3741 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3742 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3743 0834c866 Iustin Pop
3744 0834c866 Iustin Pop
    # Step: check other node consistency
3745 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3746 0834c866 Iustin Pop
    for dev in instance.disks:
3747 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3748 0834c866 Iustin Pop
        continue
3749 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3750 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3751 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3752 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3753 0834c866 Iustin Pop
                                 pri_node)
3754 0834c866 Iustin Pop
3755 0834c866 Iustin Pop
    # Step: create new storage
3756 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3757 a9e0c397 Iustin Pop
    for dev in instance.disks:
3758 a9e0c397 Iustin Pop
      size = dev.size
3759 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3760 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3761 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3762 a9e0c397 Iustin Pop
      # are talking about the secondary node
3763 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3764 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3765 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3766 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3767 a9e0c397 Iustin Pop
                                   " node '%s'" %
3768 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3769 a9e0c397 Iustin Pop
3770 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3771 0834c866 Iustin Pop
3772 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3773 0834c866 Iustin Pop
    for dev in instance.disks:
3774 0834c866 Iustin Pop
      size = dev.size
3775 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3776 a9e0c397 Iustin Pop
      # create new devices on new_node
3777 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3778 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3779 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3780 a9e0c397 Iustin Pop
                              children=dev.children)
3781 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3782 3f78eef2 Iustin Pop
                                        new_drbd, False,
3783 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3784 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3785 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3786 a9e0c397 Iustin Pop
3787 0834c866 Iustin Pop
    for dev in instance.disks:
3788 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3789 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3790 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3791 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3792 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3793 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
3794 a9e0c397 Iustin Pop
3795 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
3796 642445d9 Iustin Pop
    done = 0
3797 642445d9 Iustin Pop
    for dev in instance.disks:
3798 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3799 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
3800 642445d9 Iustin Pop
      # detach from network
3801 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
3802 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
3803 642445d9 Iustin Pop
      # standalone state
3804 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
3805 642445d9 Iustin Pop
        done += 1
3806 642445d9 Iustin Pop
      else:
3807 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
3808 642445d9 Iustin Pop
                dev.iv_name)
3809 642445d9 Iustin Pop
3810 642445d9 Iustin Pop
    if not done:
3811 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
3812 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3813 642445d9 Iustin Pop
3814 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
3815 642445d9 Iustin Pop
    # the instance to point to the new secondary
3816 642445d9 Iustin Pop
    info("updating instance configuration")
3817 642445d9 Iustin Pop
    for dev in instance.disks:
3818 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3819 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3820 642445d9 Iustin Pop
    cfg.Update(instance)
3821 a9e0c397 Iustin Pop
3822 642445d9 Iustin Pop
    # and now perform the drbd attach
3823 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
3824 642445d9 Iustin Pop
    failures = []
3825 642445d9 Iustin Pop
    for dev in instance.disks:
3826 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3827 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
3828 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
3829 642445d9 Iustin Pop
      # is correct
3830 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3831 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3832 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3833 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
3834 a9e0c397 Iustin Pop
3835 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3836 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3837 a9e0c397 Iustin Pop
    # return value
3838 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3839 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3840 a9e0c397 Iustin Pop
3841 a9e0c397 Iustin Pop
    # so check manually all the devices
3842 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3843 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3844 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3845 a9e0c397 Iustin Pop
      if is_degr:
3846 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3847 a9e0c397 Iustin Pop
3848 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3849 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3850 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
3851 a9e0c397 Iustin Pop
      for lv in old_lvs:
3852 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3853 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3854 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
3855 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
3856 a9e0c397 Iustin Pop
3857 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3858 a9e0c397 Iustin Pop
    """Execute disk replacement.
3859 a9e0c397 Iustin Pop

3860 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3861 a9e0c397 Iustin Pop

3862 a9e0c397 Iustin Pop
    """
3863 a9e0c397 Iustin Pop
    instance = self.instance
3864 22985314 Guido Trotter
3865 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
3866 22985314 Guido Trotter
    if instance.status == "down":
3867 22985314 Guido Trotter
      op = opcodes.OpActivateInstanceDisks(instance_name=instance.name)
3868 22985314 Guido Trotter
      self.proc.ChainOpCode(op)
3869 22985314 Guido Trotter
3870 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3871 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
3872 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
3873 a9e0c397 Iustin Pop
      else:
3874 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
3875 a9e0c397 Iustin Pop
    else:
3876 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
3877 22985314 Guido Trotter
3878 22985314 Guido Trotter
    ret = fn(feedback_fn)
3879 22985314 Guido Trotter
3880 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
3881 22985314 Guido Trotter
    if instance.status == "down":
3882 22985314 Guido Trotter
      op = opcodes.OpDeactivateInstanceDisks(instance_name=instance.name)
3883 22985314 Guido Trotter
      self.proc.ChainOpCode(op)
3884 22985314 Guido Trotter
3885 22985314 Guido Trotter
    return ret
3886 a9e0c397 Iustin Pop
3887 a8083063 Iustin Pop
3888 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
3889 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
3890 8729e0d7 Iustin Pop

3891 8729e0d7 Iustin Pop
  """
3892 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
3893 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3894 8729e0d7 Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount"]
3895 8729e0d7 Iustin Pop
3896 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
3897 8729e0d7 Iustin Pop
    """Build hooks env.
3898 8729e0d7 Iustin Pop

3899 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3900 8729e0d7 Iustin Pop

3901 8729e0d7 Iustin Pop
    """
3902 8729e0d7 Iustin Pop
    env = {
3903 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
3904 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
3905 8729e0d7 Iustin Pop
      }
3906 8729e0d7 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3907 8729e0d7 Iustin Pop
    nl = [
3908 8729e0d7 Iustin Pop
      self.sstore.GetMasterNode(),
3909 8729e0d7 Iustin Pop
      self.instance.primary_node,
3910 8729e0d7 Iustin Pop
      ]
3911 8729e0d7 Iustin Pop
    return env, nl, nl
3912 8729e0d7 Iustin Pop
3913 8729e0d7 Iustin Pop
  def CheckPrereq(self):
3914 8729e0d7 Iustin Pop
    """Check prerequisites.
3915 8729e0d7 Iustin Pop

3916 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
3917 8729e0d7 Iustin Pop

3918 8729e0d7 Iustin Pop
    """
3919 8729e0d7 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3920 8729e0d7 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3921 8729e0d7 Iustin Pop
    if instance is None:
3922 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3923 8729e0d7 Iustin Pop
                                 self.op.instance_name)
3924 8729e0d7 Iustin Pop
    self.instance = instance
3925 8729e0d7 Iustin Pop
    self.op.instance_name = instance.name
3926 8729e0d7 Iustin Pop
3927 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
3928 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
3929 8729e0d7 Iustin Pop
                                 " growing.")
3930 8729e0d7 Iustin Pop
3931 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
3932 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3933 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
3934 8729e0d7 Iustin Pop
3935 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
3936 8729e0d7 Iustin Pop
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3937 8729e0d7 Iustin Pop
    for node in nodenames:
3938 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
3939 8729e0d7 Iustin Pop
      if not info:
3940 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
3941 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
3942 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
3943 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
3944 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
3945 8729e0d7 Iustin Pop
                                   " node %s" % node)
3946 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
3947 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
3948 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
3949 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
3950 8729e0d7 Iustin Pop
3951 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
3952 8729e0d7 Iustin Pop
    """Execute disk grow.
3953 8729e0d7 Iustin Pop

3954 8729e0d7 Iustin Pop
    """
3955 8729e0d7 Iustin Pop
    instance = self.instance
3956 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
3957 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
3958 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
3959 8729e0d7 Iustin Pop
      result = rpc.call_blockdev_grow(node, disk, self.op.amount)
3960 8729e0d7 Iustin Pop
      if not result or not isinstance(result, tuple) or len(result) != 2:
3961 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
3962 8729e0d7 Iustin Pop
      elif not result[0]:
3963 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
3964 8729e0d7 Iustin Pop
                                 (node, result[1]))
3965 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
3966 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
3967 8729e0d7 Iustin Pop
    return
3968 8729e0d7 Iustin Pop
3969 8729e0d7 Iustin Pop
3970 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3971 a8083063 Iustin Pop
  """Query runtime instance data.
3972 a8083063 Iustin Pop

3973 a8083063 Iustin Pop
  """
3974 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3975 a8083063 Iustin Pop
3976 a8083063 Iustin Pop
  def CheckPrereq(self):
3977 a8083063 Iustin Pop
    """Check prerequisites.
3978 a8083063 Iustin Pop

3979 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3980 a8083063 Iustin Pop

3981 a8083063 Iustin Pop
    """
3982 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3983 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
3984 a8083063 Iustin Pop
    if self.op.instances:
3985 a8083063 Iustin Pop
      self.wanted_instances = []
3986 a8083063 Iustin Pop
      names = self.op.instances
3987 a8083063 Iustin Pop
      for name in names:
3988 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3989 a8083063 Iustin Pop
        if instance is None:
3990 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
3991 515207af Guido Trotter
        self.wanted_instances.append(instance)
3992 a8083063 Iustin Pop
    else:
3993 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3994 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3995 a8083063 Iustin Pop
    return
3996 a8083063 Iustin Pop
3997 a8083063 Iustin Pop
3998 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3999 a8083063 Iustin Pop
    """Compute block device status.
4000 a8083063 Iustin Pop

4001 a8083063 Iustin Pop
    """
4002 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4003 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4004 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4005 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4006 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4007 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4008 a8083063 Iustin Pop
      else:
4009 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4010 a8083063 Iustin Pop
4011 a8083063 Iustin Pop
    if snode:
4012 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4013 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4014 a8083063 Iustin Pop
    else:
4015 a8083063 Iustin Pop
      dev_sstatus = None
4016 a8083063 Iustin Pop
4017 a8083063 Iustin Pop
    if dev.children:
4018 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4019 a8083063 Iustin Pop
                      for child in dev.children]
4020 a8083063 Iustin Pop
    else:
4021 a8083063 Iustin Pop
      dev_children = []
4022 a8083063 Iustin Pop
4023 a8083063 Iustin Pop
    data = {
4024 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4025 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4026 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4027 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4028 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4029 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4030 a8083063 Iustin Pop
      "children": dev_children,
4031 a8083063 Iustin Pop
      }
4032 a8083063 Iustin Pop
4033 a8083063 Iustin Pop
    return data
4034 a8083063 Iustin Pop
4035 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4036 a8083063 Iustin Pop
    """Gather and return data"""
4037 a8083063 Iustin Pop
    result = {}
4038 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4039 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4040 a8083063 Iustin Pop
                                                instance.name)
4041 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4042 a8083063 Iustin Pop
        remote_state = "up"
4043 a8083063 Iustin Pop
      else:
4044 a8083063 Iustin Pop
        remote_state = "down"
4045 a8083063 Iustin Pop
      if instance.status == "down":
4046 a8083063 Iustin Pop
        config_state = "down"
4047 a8083063 Iustin Pop
      else:
4048 a8083063 Iustin Pop
        config_state = "up"
4049 a8083063 Iustin Pop
4050 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4051 a8083063 Iustin Pop
               for device in instance.disks]
4052 a8083063 Iustin Pop
4053 a8083063 Iustin Pop
      idict = {
4054 a8083063 Iustin Pop
        "name": instance.name,
4055 a8083063 Iustin Pop
        "config_state": config_state,
4056 a8083063 Iustin Pop
        "run_state": remote_state,
4057 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4058 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4059 a8083063 Iustin Pop
        "os": instance.os,
4060 a8083063 Iustin Pop
        "memory": instance.memory,
4061 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4062 a8083063 Iustin Pop
        "disks": disks,
4063 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4064 a8083063 Iustin Pop
        }
4065 a8083063 Iustin Pop
4066 a8340917 Iustin Pop
      htkind = self.sstore.GetHypervisorType()
4067 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_PVM30:
4068 a8340917 Iustin Pop
        idict["kernel_path"] = instance.kernel_path
4069 a8340917 Iustin Pop
        idict["initrd_path"] = instance.initrd_path
4070 a8340917 Iustin Pop
4071 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_HVM31:
4072 a8340917 Iustin Pop
        idict["hvm_boot_order"] = instance.hvm_boot_order
4073 a8340917 Iustin Pop
        idict["hvm_acpi"] = instance.hvm_acpi
4074 a8340917 Iustin Pop
        idict["hvm_pae"] = instance.hvm_pae
4075 a8340917 Iustin Pop
        idict["hvm_cdrom_image_path"] = instance.hvm_cdrom_image_path
4076 a8340917 Iustin Pop
4077 a8340917 Iustin Pop
      if htkind in constants.HTS_REQ_PORT:
4078 a8340917 Iustin Pop
        idict["vnc_bind_address"] = instance.vnc_bind_address
4079 a8340917 Iustin Pop
        idict["network_port"] = instance.network_port
4080 a8340917 Iustin Pop
4081 a8083063 Iustin Pop
      result[instance.name] = idict
4082 a8083063 Iustin Pop
4083 a8083063 Iustin Pop
    return result
4084 a8083063 Iustin Pop
4085 a8083063 Iustin Pop
4086 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4087 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4088 a8083063 Iustin Pop

4089 a8083063 Iustin Pop
  """
4090 a8083063 Iustin Pop
  HPATH = "instance-modify"
4091 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4092 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4093 1a5c7281 Guido Trotter
  REQ_BGL = False
4094 1a5c7281 Guido Trotter
4095 1a5c7281 Guido Trotter
  def ExpandNames(self):
4096 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4097 a8083063 Iustin Pop
4098 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4099 a8083063 Iustin Pop
    """Build hooks env.
4100 a8083063 Iustin Pop

4101 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4102 a8083063 Iustin Pop

4103 a8083063 Iustin Pop
    """
4104 396e1b78 Michael Hanselmann
    args = dict()
4105 a8083063 Iustin Pop
    if self.mem:
4106 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4107 a8083063 Iustin Pop
    if self.vcpus:
4108 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4109 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4110 396e1b78 Michael Hanselmann
      if self.do_ip:
4111 396e1b78 Michael Hanselmann
        ip = self.ip
4112 396e1b78 Michael Hanselmann
      else:
4113 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4114 396e1b78 Michael Hanselmann
      if self.bridge:
4115 396e1b78 Michael Hanselmann
        bridge = self.bridge
4116 396e1b78 Michael Hanselmann
      else:
4117 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4118 ef756965 Iustin Pop
      if self.mac:
4119 ef756965 Iustin Pop
        mac = self.mac
4120 ef756965 Iustin Pop
      else:
4121 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4122 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4123 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4124 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4125 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4126 a8083063 Iustin Pop
    return env, nl, nl
4127 a8083063 Iustin Pop
4128 a8083063 Iustin Pop
  def CheckPrereq(self):
4129 a8083063 Iustin Pop
    """Check prerequisites.
4130 a8083063 Iustin Pop

4131 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4132 a8083063 Iustin Pop

4133 a8083063 Iustin Pop
    """
4134 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4135 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4136 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4137 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4138 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4139 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4140 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4141 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4142 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4143 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4144 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4145 31a853d2 Iustin Pop
    self.hvm_acpi = getattr(self.op, "hvm_acpi", None)
4146 31a853d2 Iustin Pop
    self.hvm_pae = getattr(self.op, "hvm_pae", None)
4147 31a853d2 Iustin Pop
    self.hvm_cdrom_image_path = getattr(self.op, "hvm_cdrom_image_path", None)
4148 31a853d2 Iustin Pop
    self.vnc_bind_address = getattr(self.op, "vnc_bind_address", None)
4149 31a853d2 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4150 31a853d2 Iustin Pop
                 self.kernel_path, self.initrd_path, self.hvm_boot_order,
4151 31a853d2 Iustin Pop
                 self.hvm_acpi, self.hvm_pae, self.hvm_cdrom_image_path,
4152 31a853d2 Iustin Pop
                 self.vnc_bind_address]
4153 31a853d2 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4154 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4155 a8083063 Iustin Pop
    if self.mem is not None:
4156 a8083063 Iustin Pop
      try:
4157 a8083063 Iustin Pop
        self.mem = int(self.mem)
4158 a8083063 Iustin Pop
      except ValueError, err:
4159 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4160 a8083063 Iustin Pop
    if self.vcpus is not None:
4161 a8083063 Iustin Pop
      try:
4162 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4163 a8083063 Iustin Pop
      except ValueError, err:
4164 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4165 a8083063 Iustin Pop
    if self.ip is not None:
4166 a8083063 Iustin Pop
      self.do_ip = True
4167 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4168 a8083063 Iustin Pop
        self.ip = None
4169 a8083063 Iustin Pop
      else:
4170 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4171 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4172 a8083063 Iustin Pop
    else:
4173 a8083063 Iustin Pop
      self.do_ip = False
4174 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4175 1862d460 Alexander Schreiber
    if self.mac is not None:
4176 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4177 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4178 1862d460 Alexander Schreiber
                                   self.mac)
4179 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4180 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4181 a8083063 Iustin Pop
4182 973d7867 Iustin Pop
    if self.kernel_path is not None:
4183 973d7867 Iustin Pop
      self.do_kernel_path = True
4184 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4185 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4186 973d7867 Iustin Pop
4187 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4188 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4189 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4190 973d7867 Iustin Pop
                                    " filename")
4191 8cafeb26 Iustin Pop
    else:
4192 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4193 973d7867 Iustin Pop
4194 973d7867 Iustin Pop
    if self.initrd_path is not None:
4195 973d7867 Iustin Pop
      self.do_initrd_path = True
4196 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4197 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4198 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4199 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4200 973d7867 Iustin Pop
                                    " filename")
4201 8cafeb26 Iustin Pop
    else:
4202 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4203 973d7867 Iustin Pop
4204 25c5878d Alexander Schreiber
    # boot order verification
4205 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4206 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4207 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4208 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4209 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4210 25c5878d Alexander Schreiber
                                     " or 'default'")
4211 25c5878d Alexander Schreiber
4212 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
4213 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
4214 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
4215 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
4216 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
4217 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4218 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
4219 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
4220 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
4221 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
4222 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4223 31a853d2 Iustin Pop
4224 31a853d2 Iustin Pop
    # vnc_bind_address verification
4225 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
4226 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
4227 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
4228 31a853d2 Iustin Pop
                                   " like a valid IP address" %
4229 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
4230 31a853d2 Iustin Pop
4231 1a5c7281 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4232 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4233 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4234 a8083063 Iustin Pop
    return
4235 a8083063 Iustin Pop
4236 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4237 a8083063 Iustin Pop
    """Modifies an instance.
4238 a8083063 Iustin Pop

4239 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4240 a8083063 Iustin Pop
    """
4241 a8083063 Iustin Pop
    result = []
4242 a8083063 Iustin Pop
    instance = self.instance
4243 a8083063 Iustin Pop
    if self.mem:
4244 a8083063 Iustin Pop
      instance.memory = self.mem
4245 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4246 a8083063 Iustin Pop
    if self.vcpus:
4247 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4248 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4249 a8083063 Iustin Pop
    if self.do_ip:
4250 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4251 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4252 a8083063 Iustin Pop
    if self.bridge:
4253 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4254 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4255 1862d460 Alexander Schreiber
    if self.mac:
4256 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4257 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4258 973d7867 Iustin Pop
    if self.do_kernel_path:
4259 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4260 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4261 973d7867 Iustin Pop
    if self.do_initrd_path:
4262 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4263 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4264 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4265 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4266 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4267 25c5878d Alexander Schreiber
      else:
4268 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4269 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4270 31a853d2 Iustin Pop
    if self.hvm_acpi:
4271 ec1ba002 Iustin Pop
      instance.hvm_acpi = self.hvm_acpi
4272 31a853d2 Iustin Pop
      result.append(("hvm_acpi", self.hvm_acpi))
4273 31a853d2 Iustin Pop
    if self.hvm_pae:
4274 ec1ba002 Iustin Pop
      instance.hvm_pae = self.hvm_pae
4275 31a853d2 Iustin Pop
      result.append(("hvm_pae", self.hvm_pae))
4276 31a853d2 Iustin Pop
    if self.hvm_cdrom_image_path:
4277 ec1ba002 Iustin Pop
      instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
4278 31a853d2 Iustin Pop
      result.append(("hvm_cdrom_image_path", self.hvm_cdrom_image_path))
4279 31a853d2 Iustin Pop
    if self.vnc_bind_address:
4280 31a853d2 Iustin Pop
      instance.vnc_bind_address = self.vnc_bind_address
4281 31a853d2 Iustin Pop
      result.append(("vnc_bind_address", self.vnc_bind_address))
4282 a8083063 Iustin Pop
4283 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4284 a8083063 Iustin Pop
4285 a8083063 Iustin Pop
    return result
4286 a8083063 Iustin Pop
4287 a8083063 Iustin Pop
4288 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4289 a8083063 Iustin Pop
  """Query the exports list
4290 a8083063 Iustin Pop

4291 a8083063 Iustin Pop
  """
4292 a8083063 Iustin Pop
  _OP_REQP = []
4293 a8083063 Iustin Pop
4294 a8083063 Iustin Pop
  def CheckPrereq(self):
4295 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4296 a8083063 Iustin Pop

4297 a8083063 Iustin Pop
    """
4298 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4299 a8083063 Iustin Pop
4300 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4301 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4302 a8083063 Iustin Pop

4303 a8083063 Iustin Pop
    Returns:
4304 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4305 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4306 a8083063 Iustin Pop
      that node.
4307 a8083063 Iustin Pop

4308 a8083063 Iustin Pop
    """
4309 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4310 a8083063 Iustin Pop
4311 a8083063 Iustin Pop
4312 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4313 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4314 a8083063 Iustin Pop

4315 a8083063 Iustin Pop
  """
4316 a8083063 Iustin Pop
  HPATH = "instance-export"
4317 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4318 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4319 a8083063 Iustin Pop
4320 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4321 a8083063 Iustin Pop
    """Build hooks env.
4322 a8083063 Iustin Pop

4323 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4324 a8083063 Iustin Pop

4325 a8083063 Iustin Pop
    """
4326 a8083063 Iustin Pop
    env = {
4327 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4328 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4329 a8083063 Iustin Pop
      }
4330 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4331 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4332 a8083063 Iustin Pop
          self.op.target_node]
4333 a8083063 Iustin Pop
    return env, nl, nl
4334 a8083063 Iustin Pop
4335 a8083063 Iustin Pop
  def CheckPrereq(self):
4336 a8083063 Iustin Pop
    """Check prerequisites.
4337 a8083063 Iustin Pop

4338 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4339 a8083063 Iustin Pop

4340 a8083063 Iustin Pop
    """
4341 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4342 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4343 a8083063 Iustin Pop
    if self.instance is None:
4344 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4345 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4346 a8083063 Iustin Pop
4347 a8083063 Iustin Pop
    # node verification
4348 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4349 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4350 a8083063 Iustin Pop
4351 a8083063 Iustin Pop
    if self.dst_node is None:
4352 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4353 3ecf6786 Iustin Pop
                                 self.op.target_node)
4354 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4355 a8083063 Iustin Pop
4356 b6023d6c Manuel Franceschini
    # instance disk type verification
4357 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4358 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4359 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4360 b6023d6c Manuel Franceschini
                                   " file-based disks")
4361 b6023d6c Manuel Franceschini
4362 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4363 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4364 a8083063 Iustin Pop

4365 a8083063 Iustin Pop
    """
4366 a8083063 Iustin Pop
    instance = self.instance
4367 a8083063 Iustin Pop
    dst_node = self.dst_node
4368 a8083063 Iustin Pop
    src_node = instance.primary_node
4369 a8083063 Iustin Pop
    if self.op.shutdown:
4370 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4371 fb300fb7 Guido Trotter
      if not rpc.call_instance_shutdown(src_node, instance):
4372 fb300fb7 Guido Trotter
         raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4373 b4de68a9 Iustin Pop
                                  (instance.name, src_node))
4374 a8083063 Iustin Pop
4375 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4376 a8083063 Iustin Pop
4377 a8083063 Iustin Pop
    snap_disks = []
4378 a8083063 Iustin Pop
4379 a8083063 Iustin Pop
    try:
4380 a8083063 Iustin Pop
      for disk in instance.disks:
4381 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4382 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4383 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4384 a8083063 Iustin Pop
4385 a8083063 Iustin Pop
          if not new_dev_name:
4386 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4387 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4388 a8083063 Iustin Pop
          else:
4389 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4390 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4391 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4392 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4393 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4394 a8083063 Iustin Pop
4395 a8083063 Iustin Pop
    finally:
4396 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4397 fb300fb7 Guido Trotter
        if not rpc.call_instance_start(src_node, instance, None):
4398 fb300fb7 Guido Trotter
          _ShutdownInstanceDisks(instance, self.cfg)
4399 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4400 a8083063 Iustin Pop
4401 a8083063 Iustin Pop
    # TODO: check for size
4402 a8083063 Iustin Pop
4403 a8083063 Iustin Pop
    for dev in snap_disks:
4404 16687b98 Manuel Franceschini
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
4405 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4406 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4407 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4408 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4409 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4410 a8083063 Iustin Pop
4411 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4412 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4413 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4414 a8083063 Iustin Pop
4415 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4416 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4417 a8083063 Iustin Pop
4418 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4419 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4420 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4421 a8083063 Iustin Pop
    if nodelist:
4422 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4423 5bfac263 Iustin Pop
      exportlist = self.proc.ChainOpCode(op)
4424 a8083063 Iustin Pop
      for node in exportlist:
4425 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4426 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4427 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4428 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4429 5c947f38 Iustin Pop
4430 5c947f38 Iustin Pop
4431 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4432 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4433 9ac99fda Guido Trotter

4434 9ac99fda Guido Trotter
  """
4435 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4436 9ac99fda Guido Trotter
4437 9ac99fda Guido Trotter
  def CheckPrereq(self):
4438 9ac99fda Guido Trotter
    """Check prerequisites.
4439 9ac99fda Guido Trotter
    """
4440 9ac99fda Guido Trotter
    pass
4441 9ac99fda Guido Trotter
4442 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4443 9ac99fda Guido Trotter
    """Remove any export.
4444 9ac99fda Guido Trotter

4445 9ac99fda Guido Trotter
    """
4446 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4447 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4448 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4449 9ac99fda Guido Trotter
    fqdn_warn = False
4450 9ac99fda Guido Trotter
    if not instance_name:
4451 9ac99fda Guido Trotter
      fqdn_warn = True
4452 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4453 9ac99fda Guido Trotter
4454 9ac99fda Guido Trotter
    op = opcodes.OpQueryExports(nodes=[])
4455 9ac99fda Guido Trotter
    exportlist = self.proc.ChainOpCode(op)
4456 9ac99fda Guido Trotter
    found = False
4457 9ac99fda Guido Trotter
    for node in exportlist:
4458 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4459 9ac99fda Guido Trotter
        found = True
4460 9ac99fda Guido Trotter
        if not rpc.call_export_remove(node, instance_name):
4461 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4462 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4463 9ac99fda Guido Trotter
4464 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4465 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4466 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4467 9ac99fda Guido Trotter
                  " Domain Name.")
4468 9ac99fda Guido Trotter
4469 9ac99fda Guido Trotter
4470 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4471 5c947f38 Iustin Pop
  """Generic tags LU.
4472 5c947f38 Iustin Pop

4473 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4474 5c947f38 Iustin Pop

4475 5c947f38 Iustin Pop
  """
4476 5c947f38 Iustin Pop
  def CheckPrereq(self):
4477 5c947f38 Iustin Pop
    """Check prerequisites.
4478 5c947f38 Iustin Pop

4479 5c947f38 Iustin Pop
    """
4480 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4481 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4482 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4483 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4484 5c947f38 Iustin Pop
      if name is None:
4485 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4486 3ecf6786 Iustin Pop
                                   (self.op.name,))
4487 5c947f38 Iustin Pop
      self.op.name = name
4488 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4489 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4490 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4491 5c947f38 Iustin Pop
      if name is None:
4492 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4493 3ecf6786 Iustin Pop
                                   (self.op.name,))
4494 5c947f38 Iustin Pop
      self.op.name = name
4495 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4496 5c947f38 Iustin Pop
    else:
4497 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4498 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4499 5c947f38 Iustin Pop
4500 5c947f38 Iustin Pop
4501 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4502 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4503 5c947f38 Iustin Pop

4504 5c947f38 Iustin Pop
  """
4505 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4506 5c947f38 Iustin Pop
4507 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4508 5c947f38 Iustin Pop
    """Returns the tag list.
4509 5c947f38 Iustin Pop

4510 5c947f38 Iustin Pop
    """
4511 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
4512 5c947f38 Iustin Pop
4513 5c947f38 Iustin Pop
4514 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4515 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4516 73415719 Iustin Pop

4517 73415719 Iustin Pop
  """
4518 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4519 73415719 Iustin Pop
4520 73415719 Iustin Pop
  def CheckPrereq(self):
4521 73415719 Iustin Pop
    """Check prerequisites.
4522 73415719 Iustin Pop

4523 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4524 73415719 Iustin Pop

4525 73415719 Iustin Pop
    """
4526 73415719 Iustin Pop
    try:
4527 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4528 73415719 Iustin Pop
    except re.error, err:
4529 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4530 73415719 Iustin Pop
                                 (self.op.pattern, err))
4531 73415719 Iustin Pop
4532 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4533 73415719 Iustin Pop
    """Returns the tag list.
4534 73415719 Iustin Pop

4535 73415719 Iustin Pop
    """
4536 73415719 Iustin Pop
    cfg = self.cfg
4537 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4538 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4539 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4540 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4541 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4542 73415719 Iustin Pop
    results = []
4543 73415719 Iustin Pop
    for path, target in tgts:
4544 73415719 Iustin Pop
      for tag in target.GetTags():
4545 73415719 Iustin Pop
        if self.re.search(tag):
4546 73415719 Iustin Pop
          results.append((path, tag))
4547 73415719 Iustin Pop
    return results
4548 73415719 Iustin Pop
4549 73415719 Iustin Pop
4550 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4551 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4552 5c947f38 Iustin Pop

4553 5c947f38 Iustin Pop
  """
4554 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4555 5c947f38 Iustin Pop
4556 5c947f38 Iustin Pop
  def CheckPrereq(self):
4557 5c947f38 Iustin Pop
    """Check prerequisites.
4558 5c947f38 Iustin Pop

4559 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4560 5c947f38 Iustin Pop

4561 5c947f38 Iustin Pop
    """
4562 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4563 f27302fa Iustin Pop
    for tag in self.op.tags:
4564 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4565 5c947f38 Iustin Pop
4566 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4567 5c947f38 Iustin Pop
    """Sets the tag.
4568 5c947f38 Iustin Pop

4569 5c947f38 Iustin Pop
    """
4570 5c947f38 Iustin Pop
    try:
4571 f27302fa Iustin Pop
      for tag in self.op.tags:
4572 f27302fa Iustin Pop
        self.target.AddTag(tag)
4573 5c947f38 Iustin Pop
    except errors.TagError, err:
4574 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4575 5c947f38 Iustin Pop
    try:
4576 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4577 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4578 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4579 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4580 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4581 5c947f38 Iustin Pop
4582 5c947f38 Iustin Pop
4583 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4584 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4585 5c947f38 Iustin Pop

4586 5c947f38 Iustin Pop
  """
4587 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4588 5c947f38 Iustin Pop
4589 5c947f38 Iustin Pop
  def CheckPrereq(self):
4590 5c947f38 Iustin Pop
    """Check prerequisites.
4591 5c947f38 Iustin Pop

4592 5c947f38 Iustin Pop
    This checks that we have the given tag.
4593 5c947f38 Iustin Pop

4594 5c947f38 Iustin Pop
    """
4595 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4596 f27302fa Iustin Pop
    for tag in self.op.tags:
4597 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4598 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4599 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4600 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4601 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4602 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4603 f27302fa Iustin Pop
      diff_names.sort()
4604 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4605 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4606 5c947f38 Iustin Pop
4607 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4608 5c947f38 Iustin Pop
    """Remove the tag from the object.
4609 5c947f38 Iustin Pop

4610 5c947f38 Iustin Pop
    """
4611 f27302fa Iustin Pop
    for tag in self.op.tags:
4612 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4613 5c947f38 Iustin Pop
    try:
4614 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4615 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4616 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4617 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4618 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4619 06009e27 Iustin Pop
4620 0eed6e61 Guido Trotter
4621 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
4622 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
4623 06009e27 Iustin Pop

4624 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
4625 06009e27 Iustin Pop
  time.
4626 06009e27 Iustin Pop

4627 06009e27 Iustin Pop
  """
4628 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
4629 fbe9022f Guido Trotter
  REQ_BGL = False
4630 06009e27 Iustin Pop
4631 fbe9022f Guido Trotter
  def ExpandNames(self):
4632 fbe9022f Guido Trotter
    """Expand names and set required locks.
4633 06009e27 Iustin Pop

4634 fbe9022f Guido Trotter
    This expands the node list, if any.
4635 06009e27 Iustin Pop

4636 06009e27 Iustin Pop
    """
4637 fbe9022f Guido Trotter
    self.needed_locks = {}
4638 06009e27 Iustin Pop
    if self.op.on_nodes:
4639 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
4640 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
4641 fbe9022f Guido Trotter
      # more information.
4642 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
4643 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
4644 fbe9022f Guido Trotter
4645 fbe9022f Guido Trotter
  def CheckPrereq(self):
4646 fbe9022f Guido Trotter
    """Check prerequisites.
4647 fbe9022f Guido Trotter

4648 fbe9022f Guido Trotter
    """
4649 06009e27 Iustin Pop
4650 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
4651 06009e27 Iustin Pop
    """Do the actual sleep.
4652 06009e27 Iustin Pop

4653 06009e27 Iustin Pop
    """
4654 06009e27 Iustin Pop
    if self.op.on_master:
4655 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
4656 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
4657 06009e27 Iustin Pop
    if self.op.on_nodes:
4658 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
4659 06009e27 Iustin Pop
      if not result:
4660 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
4661 06009e27 Iustin Pop
      for node, node_result in result.items():
4662 06009e27 Iustin Pop
        if not node_result:
4663 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
4664 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
4665 d61df03e Iustin Pop
4666 d61df03e Iustin Pop
4667 d1c2dd75 Iustin Pop
class IAllocator(object):
4668 d1c2dd75 Iustin Pop
  """IAllocator framework.
4669 d61df03e Iustin Pop

4670 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
4671 d1c2dd75 Iustin Pop
    - cfg/sstore that are needed to query the cluster
4672 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
4673 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
4674 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
4675 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
4676 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
4677 d1c2dd75 Iustin Pop
      easy usage
4678 d61df03e Iustin Pop

4679 d61df03e Iustin Pop
  """
4680 29859cb7 Iustin Pop
  _ALLO_KEYS = [
4681 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
4682 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
4683 d1c2dd75 Iustin Pop
    ]
4684 29859cb7 Iustin Pop
  _RELO_KEYS = [
4685 29859cb7 Iustin Pop
    "relocate_from",
4686 29859cb7 Iustin Pop
    ]
4687 d1c2dd75 Iustin Pop
4688 29859cb7 Iustin Pop
  def __init__(self, cfg, sstore, mode, name, **kwargs):
4689 d1c2dd75 Iustin Pop
    self.cfg = cfg
4690 d1c2dd75 Iustin Pop
    self.sstore = sstore
4691 d1c2dd75 Iustin Pop
    # init buffer variables
4692 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
4693 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
4694 29859cb7 Iustin Pop
    self.mode = mode
4695 29859cb7 Iustin Pop
    self.name = name
4696 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
4697 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
4698 29859cb7 Iustin Pop
    self.relocate_from = None
4699 27579978 Iustin Pop
    # computed fields
4700 27579978 Iustin Pop
    self.required_nodes = None
4701 d1c2dd75 Iustin Pop
    # init result fields
4702 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
4703 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
4704 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
4705 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
4706 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
4707 29859cb7 Iustin Pop
    else:
4708 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
4709 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
4710 d1c2dd75 Iustin Pop
    for key in kwargs:
4711 29859cb7 Iustin Pop
      if key not in keyset:
4712 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
4713 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4714 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
4715 29859cb7 Iustin Pop
    for key in keyset:
4716 d1c2dd75 Iustin Pop
      if key not in kwargs:
4717 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
4718 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4719 d1c2dd75 Iustin Pop
    self._BuildInputData()
4720 d1c2dd75 Iustin Pop
4721 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
4722 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
4723 d1c2dd75 Iustin Pop

4724 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
4725 d1c2dd75 Iustin Pop

4726 d1c2dd75 Iustin Pop
    """
4727 d1c2dd75 Iustin Pop
    cfg = self.cfg
4728 d1c2dd75 Iustin Pop
    # cluster data
4729 d1c2dd75 Iustin Pop
    data = {
4730 d1c2dd75 Iustin Pop
      "version": 1,
4731 d1c2dd75 Iustin Pop
      "cluster_name": self.sstore.GetClusterName(),
4732 d1c2dd75 Iustin Pop
      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
4733 6286519f Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
4734 d1c2dd75 Iustin Pop
      # we don't have job IDs
4735 d61df03e Iustin Pop
      }
4736 d61df03e Iustin Pop
4737 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
4738 6286519f Iustin Pop
4739 d1c2dd75 Iustin Pop
    # node data
4740 d1c2dd75 Iustin Pop
    node_results = {}
4741 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
4742 d1c2dd75 Iustin Pop
    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
4743 d1c2dd75 Iustin Pop
    for nname in node_list:
4744 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
4745 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
4746 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
4747 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
4748 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
4749 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
4750 d1c2dd75 Iustin Pop
        if attr not in remote_info:
4751 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
4752 d1c2dd75 Iustin Pop
                                   (nname, attr))
4753 d1c2dd75 Iustin Pop
        try:
4754 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
4755 d1c2dd75 Iustin Pop
        except ValueError, err:
4756 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
4757 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
4758 6286519f Iustin Pop
      # compute memory used by primary instances
4759 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
4760 6286519f Iustin Pop
      for iinfo in i_list:
4761 6286519f Iustin Pop
        if iinfo.primary_node == nname:
4762 6286519f Iustin Pop
          i_p_mem += iinfo.memory
4763 6286519f Iustin Pop
          if iinfo.status == "up":
4764 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
4765 6286519f Iustin Pop
4766 b2662e7f Iustin Pop
      # compute memory used by instances
4767 d1c2dd75 Iustin Pop
      pnr = {
4768 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
4769 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
4770 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
4771 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
4772 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
4773 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
4774 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
4775 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
4776 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
4777 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
4778 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
4779 d1c2dd75 Iustin Pop
        }
4780 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
4781 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
4782 d1c2dd75 Iustin Pop
4783 d1c2dd75 Iustin Pop
    # instance data
4784 d1c2dd75 Iustin Pop
    instance_data = {}
4785 6286519f Iustin Pop
    for iinfo in i_list:
4786 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
4787 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
4788 d1c2dd75 Iustin Pop
      pir = {
4789 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
4790 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
4791 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
4792 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
4793 d1c2dd75 Iustin Pop
        "os": iinfo.os,
4794 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
4795 d1c2dd75 Iustin Pop
        "nics": nic_data,
4796 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
4797 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
4798 d1c2dd75 Iustin Pop
        }
4799 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
4800 d61df03e Iustin Pop
4801 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
4802 d61df03e Iustin Pop
4803 d1c2dd75 Iustin Pop
    self.in_data = data
4804 d61df03e Iustin Pop
4805 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
4806 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
4807 d61df03e Iustin Pop

4808 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
4809 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
4810 d61df03e Iustin Pop

4811 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
4812 d1c2dd75 Iustin Pop
    done.
4813 d61df03e Iustin Pop

4814 d1c2dd75 Iustin Pop
    """
4815 d1c2dd75 Iustin Pop
    data = self.in_data
4816 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
4817 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
4818 d1c2dd75 Iustin Pop
4819 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
4820 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
4821 d1c2dd75 Iustin Pop
4822 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
4823 27579978 Iustin Pop
      self.required_nodes = 2
4824 27579978 Iustin Pop
    else:
4825 27579978 Iustin Pop
      self.required_nodes = 1
4826 d1c2dd75 Iustin Pop
    request = {
4827 d1c2dd75 Iustin Pop
      "type": "allocate",
4828 d1c2dd75 Iustin Pop
      "name": self.name,
4829 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
4830 d1c2dd75 Iustin Pop
      "tags": self.tags,
4831 d1c2dd75 Iustin Pop
      "os": self.os,
4832 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
4833 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
4834 d1c2dd75 Iustin Pop
      "disks": self.disks,
4835 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
4836 d1c2dd75 Iustin Pop
      "nics": self.nics,
4837 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
4838 d1c2dd75 Iustin Pop
      }
4839 d1c2dd75 Iustin Pop
    data["request"] = request
4840 298fe380 Iustin Pop
4841 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
4842 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
4843 298fe380 Iustin Pop

4844 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
4845 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
4846 d61df03e Iustin Pop

4847 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
4848 d1c2dd75 Iustin Pop
    done.
4849 d61df03e Iustin Pop

4850 d1c2dd75 Iustin Pop
    """
4851 27579978 Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.name)
4852 27579978 Iustin Pop
    if instance is None:
4853 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
4854 27579978 Iustin Pop
                                   " IAllocator" % self.name)
4855 27579978 Iustin Pop
4856 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4857 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
4858 27579978 Iustin Pop
4859 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
4860 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
4861 2a139bb0 Iustin Pop
4862 27579978 Iustin Pop
    self.required_nodes = 1
4863 27579978 Iustin Pop
4864 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
4865 27579978 Iustin Pop
                                  instance.disks[0].size,
4866 27579978 Iustin Pop
                                  instance.disks[1].size)
4867 27579978 Iustin Pop
4868 d1c2dd75 Iustin Pop
    request = {
4869 2a139bb0 Iustin Pop
      "type": "relocate",
4870 d1c2dd75 Iustin Pop
      "name": self.name,
4871 27579978 Iustin Pop
      "disk_space_total": disk_space,
4872 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
4873 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
4874 d1c2dd75 Iustin Pop
      }
4875 27579978 Iustin Pop
    self.in_data["request"] = request
4876 d61df03e Iustin Pop
4877 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
4878 d1c2dd75 Iustin Pop
    """Build input data structures.
4879 d61df03e Iustin Pop

4880 d1c2dd75 Iustin Pop
    """
4881 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
4882 d61df03e Iustin Pop
4883 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
4884 d1c2dd75 Iustin Pop
      self._AddNewInstance()
4885 d1c2dd75 Iustin Pop
    else:
4886 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
4887 d61df03e Iustin Pop
4888 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
4889 d61df03e Iustin Pop
4890 8d528b7c Iustin Pop
  def Run(self, name, validate=True, call_fn=rpc.call_iallocator_runner):
4891 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
4892 298fe380 Iustin Pop

4893 d1c2dd75 Iustin Pop
    """
4894 d1c2dd75 Iustin Pop
    data = self.in_text
4895 298fe380 Iustin Pop
4896 8d528b7c Iustin Pop
    result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
4897 298fe380 Iustin Pop
4898 8d528b7c Iustin Pop
    if not isinstance(result, tuple) or len(result) != 4:
4899 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
4900 8d528b7c Iustin Pop
4901 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
4902 8d528b7c Iustin Pop
4903 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
4904 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
4905 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
4906 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Instance allocator call failed: %s,"
4907 d1c2dd75 Iustin Pop
                                 " output: %s" %
4908 8d528b7c Iustin Pop
                                 (fail, stdout+stderr))
4909 8d528b7c Iustin Pop
    self.out_text = stdout
4910 d1c2dd75 Iustin Pop
    if validate:
4911 d1c2dd75 Iustin Pop
      self._ValidateResult()
4912 298fe380 Iustin Pop
4913 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
4914 d1c2dd75 Iustin Pop
    """Process the allocator results.
4915 538475ca Iustin Pop

4916 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
4917 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
4918 538475ca Iustin Pop

4919 d1c2dd75 Iustin Pop
    """
4920 d1c2dd75 Iustin Pop
    try:
4921 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
4922 d1c2dd75 Iustin Pop
    except Exception, err:
4923 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
4924 d1c2dd75 Iustin Pop
4925 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
4926 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
4927 538475ca Iustin Pop
4928 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
4929 d1c2dd75 Iustin Pop
      if key not in rdict:
4930 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
4931 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
4932 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
4933 538475ca Iustin Pop
4934 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
4935 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
4936 d1c2dd75 Iustin Pop
                               " is not a list")
4937 d1c2dd75 Iustin Pop
    self.out_data = rdict
4938 538475ca Iustin Pop
4939 538475ca Iustin Pop
4940 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
4941 d61df03e Iustin Pop
  """Run allocator tests.
4942 d61df03e Iustin Pop

4943 d61df03e Iustin Pop
  This LU runs the allocator tests
4944 d61df03e Iustin Pop

4945 d61df03e Iustin Pop
  """
4946 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
4947 d61df03e Iustin Pop
4948 d61df03e Iustin Pop
  def CheckPrereq(self):
4949 d61df03e Iustin Pop
    """Check prerequisites.
4950 d61df03e Iustin Pop

4951 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
4952 d61df03e Iustin Pop

4953 d61df03e Iustin Pop
    """
4954 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
4955 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
4956 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
4957 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
4958 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
4959 d61df03e Iustin Pop
                                     attr)
4960 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
4961 d61df03e Iustin Pop
      if iname is not None:
4962 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
4963 d61df03e Iustin Pop
                                   iname)
4964 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
4965 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
4966 d61df03e Iustin Pop
      for row in self.op.nics:
4967 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
4968 d61df03e Iustin Pop
            "mac" not in row or
4969 d61df03e Iustin Pop
            "ip" not in row or
4970 d61df03e Iustin Pop
            "bridge" not in row):
4971 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
4972 d61df03e Iustin Pop
                                     " 'nics' parameter")
4973 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
4974 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
4975 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
4976 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
4977 d61df03e Iustin Pop
      for row in self.op.disks:
4978 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
4979 d61df03e Iustin Pop
            "size" not in row or
4980 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
4981 d61df03e Iustin Pop
            "mode" not in row or
4982 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
4983 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
4984 d61df03e Iustin Pop
                                     " 'disks' parameter")
4985 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
4986 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
4987 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
4988 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
4989 d61df03e Iustin Pop
      if fname is None:
4990 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
4991 d61df03e Iustin Pop
                                   self.op.name)
4992 d61df03e Iustin Pop
      self.op.name = fname
4993 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
4994 d61df03e Iustin Pop
    else:
4995 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
4996 d61df03e Iustin Pop
                                 self.op.mode)
4997 d61df03e Iustin Pop
4998 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
4999 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5000 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5001 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5002 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5003 d61df03e Iustin Pop
                                 self.op.direction)
5004 d61df03e Iustin Pop
5005 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5006 d61df03e Iustin Pop
    """Run the allocator test.
5007 d61df03e Iustin Pop

5008 d61df03e Iustin Pop
    """
5009 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5010 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5011 29859cb7 Iustin Pop
                       mode=self.op.mode,
5012 29859cb7 Iustin Pop
                       name=self.op.name,
5013 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5014 29859cb7 Iustin Pop
                       disks=self.op.disks,
5015 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5016 29859cb7 Iustin Pop
                       os=self.op.os,
5017 29859cb7 Iustin Pop
                       tags=self.op.tags,
5018 29859cb7 Iustin Pop
                       nics=self.op.nics,
5019 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5020 29859cb7 Iustin Pop
                       )
5021 29859cb7 Iustin Pop
    else:
5022 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5023 29859cb7 Iustin Pop
                       mode=self.op.mode,
5024 29859cb7 Iustin Pop
                       name=self.op.name,
5025 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5026 29859cb7 Iustin Pop
                       )
5027 d61df03e Iustin Pop
5028 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5029 d1c2dd75 Iustin Pop
      result = ial.in_text
5030 298fe380 Iustin Pop
    else:
5031 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5032 d1c2dd75 Iustin Pop
      result = ial.out_text
5033 298fe380 Iustin Pop
    return result