Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ e310b019

History | View | Annotate | Download (179.9 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 8d14b30d Iustin Pop
from ganeti import serializer
45 d61df03e Iustin Pop
46 d61df03e Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 d465bdc8 Guido Trotter
    - implement ExpandNames
52 d465bdc8 Guido Trotter
    - implement CheckPrereq
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 05f86716 Guido Trotter
    - optionally redefine their run requirements:
57 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
58 05f86716 Guido Trotter
        REQ_WSSTORE: the LU needs a writable SimpleStore
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 a8083063 Iustin Pop
  REQ_MASTER = True
68 05f86716 Guido Trotter
  REQ_WSSTORE = False
69 7e55040e Guido Trotter
  REQ_BGL = True
70 a8083063 Iustin Pop
71 77b657a3 Guido Trotter
  def __init__(self, processor, op, context, sstore):
72 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
75 a8083063 Iustin Pop
    validity.
76 a8083063 Iustin Pop

77 a8083063 Iustin Pop
    """
78 5bfac263 Iustin Pop
    self.proc = processor
79 a8083063 Iustin Pop
    self.op = op
80 77b657a3 Guido Trotter
    self.cfg = context.cfg
81 a8083063 Iustin Pop
    self.sstore = sstore
82 77b657a3 Guido Trotter
    self.context = context
83 d465bdc8 Guido Trotter
    self.needed_locks = None
84 6683bba2 Guido Trotter
    self.acquired_locks = {}
85 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 c92b310a Michael Hanselmann
90 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
91 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
92 a8083063 Iustin Pop
      if attr_val is None:
93 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
94 3ecf6786 Iustin Pop
                                   attr_name)
95 c6d58a2b Michael Hanselmann
96 f64c9de6 Guido Trotter
    if not self.cfg.IsCluster():
97 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
98 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
99 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
100 c6d58a2b Michael Hanselmann
      master = sstore.GetMasterNode()
101 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
102 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
103 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
104 a8083063 Iustin Pop
105 c92b310a Michael Hanselmann
  def __GetSSH(self):
106 c92b310a Michael Hanselmann
    """Returns the SshRunner object
107 c92b310a Michael Hanselmann

108 c92b310a Michael Hanselmann
    """
109 c92b310a Michael Hanselmann
    if not self.__ssh:
110 1ff08570 Michael Hanselmann
      self.__ssh = ssh.SshRunner(self.sstore)
111 c92b310a Michael Hanselmann
    return self.__ssh
112 c92b310a Michael Hanselmann
113 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
114 c92b310a Michael Hanselmann
115 d465bdc8 Guido Trotter
  def ExpandNames(self):
116 d465bdc8 Guido Trotter
    """Expand names for this LU.
117 d465bdc8 Guido Trotter

118 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
119 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
120 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
121 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
122 d465bdc8 Guido Trotter

123 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
124 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
125 d465bdc8 Guido Trotter
    as values. Rules:
126 d465bdc8 Guido Trotter
      - Use an empty dict if you don't need any lock
127 d465bdc8 Guido Trotter
      - If you don't need any lock at a particular level omit that level
128 d465bdc8 Guido Trotter
      - Don't put anything for the BGL level
129 e310b019 Guido Trotter
      - If you want all locks at a level use locking.ALL_SET as a value
130 d465bdc8 Guido Trotter

131 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
132 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
133 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
134 3977a4c1 Guido Trotter

135 d465bdc8 Guido Trotter
    Examples:
136 d465bdc8 Guido Trotter
    # Acquire all nodes and one instance
137 d465bdc8 Guido Trotter
    self.needed_locks = {
138 e310b019 Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
139 d465bdc8 Guido Trotter
      locking.LEVEL_INSTANCES: ['instance1.example.tld'],
140 d465bdc8 Guido Trotter
    }
141 d465bdc8 Guido Trotter
    # Acquire just two nodes
142 d465bdc8 Guido Trotter
    self.needed_locks = {
143 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
144 d465bdc8 Guido Trotter
    }
145 d465bdc8 Guido Trotter
    # Acquire no locks
146 d465bdc8 Guido Trotter
    self.needed_locks = {} # No, you can't leave it to the default value None
147 d465bdc8 Guido Trotter

148 d465bdc8 Guido Trotter
    """
149 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
150 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
151 d465bdc8 Guido Trotter
    # time.
152 d465bdc8 Guido Trotter
    if self.REQ_BGL:
153 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
154 d465bdc8 Guido Trotter
    else:
155 d465bdc8 Guido Trotter
      raise NotImplementedError
156 d465bdc8 Guido Trotter
157 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
158 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
159 fb8dcb62 Guido Trotter

160 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
161 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
162 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
163 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
164 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
165 fb8dcb62 Guido Trotter
    default it does nothing.
166 fb8dcb62 Guido Trotter

167 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
168 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
169 fb8dcb62 Guido Trotter

170 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
171 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
172 fb8dcb62 Guido Trotter

173 fb8dcb62 Guido Trotter
    """
174 fb8dcb62 Guido Trotter
175 a8083063 Iustin Pop
  def CheckPrereq(self):
176 a8083063 Iustin Pop
    """Check prerequisites for this LU.
177 a8083063 Iustin Pop

178 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
179 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
180 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
181 a8083063 Iustin Pop
    allowed.
182 a8083063 Iustin Pop

183 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
184 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
185 a8083063 Iustin Pop

186 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
187 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
188 a8083063 Iustin Pop

189 a8083063 Iustin Pop
    """
190 a8083063 Iustin Pop
    raise NotImplementedError
191 a8083063 Iustin Pop
192 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
193 a8083063 Iustin Pop
    """Execute the LU.
194 a8083063 Iustin Pop

195 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
196 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
197 a8083063 Iustin Pop
    code, or expected.
198 a8083063 Iustin Pop

199 a8083063 Iustin Pop
    """
200 a8083063 Iustin Pop
    raise NotImplementedError
201 a8083063 Iustin Pop
202 a8083063 Iustin Pop
  def BuildHooksEnv(self):
203 a8083063 Iustin Pop
    """Build hooks environment for this LU.
204 a8083063 Iustin Pop

205 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
206 a8083063 Iustin Pop
    containing the environment that will be used for running the
207 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
208 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
209 a8083063 Iustin Pop
    the hook should run after the execution.
210 a8083063 Iustin Pop

211 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
212 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
213 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
214 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
215 a8083063 Iustin Pop

216 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
217 a8083063 Iustin Pop

218 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
219 a8083063 Iustin Pop
    not be called.
220 a8083063 Iustin Pop

221 a8083063 Iustin Pop
    """
222 a8083063 Iustin Pop
    raise NotImplementedError
223 a8083063 Iustin Pop
224 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
225 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
226 1fce5219 Guido Trotter

227 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
228 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
229 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
230 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
231 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
232 1fce5219 Guido Trotter

233 1fce5219 Guido Trotter
    Args:
234 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
235 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
236 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
237 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
238 1fce5219 Guido Trotter

239 1fce5219 Guido Trotter
    """
240 1fce5219 Guido Trotter
    return lu_result
241 1fce5219 Guido Trotter
242 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
243 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
244 43905206 Guido Trotter

245 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
246 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
247 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
248 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
249 43905206 Guido Trotter
    before.
250 43905206 Guido Trotter

251 43905206 Guido Trotter
    """
252 43905206 Guido Trotter
    if self.needed_locks is None:
253 43905206 Guido Trotter
      self.needed_locks = {}
254 43905206 Guido Trotter
    else:
255 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
256 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
257 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
258 43905206 Guido Trotter
    if expanded_name is None:
259 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
260 43905206 Guido Trotter
                                  self.op.instance_name)
261 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
262 43905206 Guido Trotter
    self.op.instance_name = expanded_name
263 43905206 Guido Trotter
264 c4a2fee1 Guido Trotter
  def _LockInstancesNodes(self):
265 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
266 c4a2fee1 Guido Trotter

267 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
268 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
269 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
270 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
271 c4a2fee1 Guido Trotter

272 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
273 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
274 c4a2fee1 Guido Trotter

275 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
276 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
277 c4a2fee1 Guido Trotter

278 c4a2fee1 Guido Trotter
    If should be called in DeclareLocks in a way similar to:
279 c4a2fee1 Guido Trotter

280 c4a2fee1 Guido Trotter
    if level == locking.LEVEL_NODE:
281 c4a2fee1 Guido Trotter
      self._LockInstancesNodes()
282 c4a2fee1 Guido Trotter

283 c4a2fee1 Guido Trotter
    """
284 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
285 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
286 c4a2fee1 Guido Trotter
287 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
288 c4a2fee1 Guido Trotter
289 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
290 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
291 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
292 c4a2fee1 Guido Trotter
    wanted_nodes = []
293 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
294 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
295 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
296 c4a2fee1 Guido Trotter
      wanted_nodes.extend(instance.secondary_nodes)
297 c4a2fee1 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
298 c4a2fee1 Guido Trotter
299 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
300 c4a2fee1 Guido Trotter
301 a8083063 Iustin Pop
302 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
303 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
304 a8083063 Iustin Pop

305 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
306 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
307 a8083063 Iustin Pop

308 a8083063 Iustin Pop
  """
309 a8083063 Iustin Pop
  HPATH = None
310 a8083063 Iustin Pop
  HTYPE = None
311 a8083063 Iustin Pop
312 a8083063 Iustin Pop
313 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
314 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
315 83120a01 Michael Hanselmann

316 83120a01 Michael Hanselmann
  Args:
317 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
318 83120a01 Michael Hanselmann

319 83120a01 Michael Hanselmann
  """
320 3312b702 Iustin Pop
  if not isinstance(nodes, list):
321 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
322 dcb93971 Michael Hanselmann
323 ea47808a Guido Trotter
  if not nodes:
324 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
325 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
326 dcb93971 Michael Hanselmann
327 ea47808a Guido Trotter
  wanted = []
328 ea47808a Guido Trotter
  for name in nodes:
329 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
330 ea47808a Guido Trotter
    if node is None:
331 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
332 ea47808a Guido Trotter
    wanted.append(node)
333 dcb93971 Michael Hanselmann
334 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
335 3312b702 Iustin Pop
336 3312b702 Iustin Pop
337 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
338 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
339 3312b702 Iustin Pop

340 3312b702 Iustin Pop
  Args:
341 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
342 3312b702 Iustin Pop

343 3312b702 Iustin Pop
  """
344 3312b702 Iustin Pop
  if not isinstance(instances, list):
345 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
346 3312b702 Iustin Pop
347 3312b702 Iustin Pop
  if instances:
348 3312b702 Iustin Pop
    wanted = []
349 3312b702 Iustin Pop
350 3312b702 Iustin Pop
    for name in instances:
351 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
352 3312b702 Iustin Pop
      if instance is None:
353 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
354 3312b702 Iustin Pop
      wanted.append(instance)
355 3312b702 Iustin Pop
356 3312b702 Iustin Pop
  else:
357 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
358 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
359 dcb93971 Michael Hanselmann
360 dcb93971 Michael Hanselmann
361 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
362 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
363 83120a01 Michael Hanselmann

364 83120a01 Michael Hanselmann
  Args:
365 83120a01 Michael Hanselmann
    static: Static fields
366 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
367 83120a01 Michael Hanselmann

368 83120a01 Michael Hanselmann
  """
369 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
370 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
371 dcb93971 Michael Hanselmann
372 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
373 dcb93971 Michael Hanselmann
374 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
375 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
376 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
377 3ecf6786 Iustin Pop
                                          difference(all_fields)))
378 dcb93971 Michael Hanselmann
379 dcb93971 Michael Hanselmann
380 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
381 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
382 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
383 ecb215b5 Michael Hanselmann

384 ecb215b5 Michael Hanselmann
  Args:
385 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
386 396e1b78 Michael Hanselmann
  """
387 396e1b78 Michael Hanselmann
  env = {
388 0e137c28 Iustin Pop
    "OP_TARGET": name,
389 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
390 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
391 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
392 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
393 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
394 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
395 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
396 396e1b78 Michael Hanselmann
  }
397 396e1b78 Michael Hanselmann
398 396e1b78 Michael Hanselmann
  if nics:
399 396e1b78 Michael Hanselmann
    nic_count = len(nics)
400 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
401 396e1b78 Michael Hanselmann
      if ip is None:
402 396e1b78 Michael Hanselmann
        ip = ""
403 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
404 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
405 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
406 396e1b78 Michael Hanselmann
  else:
407 396e1b78 Michael Hanselmann
    nic_count = 0
408 396e1b78 Michael Hanselmann
409 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
410 396e1b78 Michael Hanselmann
411 396e1b78 Michael Hanselmann
  return env
412 396e1b78 Michael Hanselmann
413 396e1b78 Michael Hanselmann
414 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
415 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
416 ecb215b5 Michael Hanselmann

417 ecb215b5 Michael Hanselmann
  Args:
418 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
419 ecb215b5 Michael Hanselmann
    override: dict of values to override
420 ecb215b5 Michael Hanselmann
  """
421 396e1b78 Michael Hanselmann
  args = {
422 396e1b78 Michael Hanselmann
    'name': instance.name,
423 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
424 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
425 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
426 396e1b78 Michael Hanselmann
    'status': instance.os,
427 396e1b78 Michael Hanselmann
    'memory': instance.memory,
428 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
429 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
430 396e1b78 Michael Hanselmann
  }
431 396e1b78 Michael Hanselmann
  if override:
432 396e1b78 Michael Hanselmann
    args.update(override)
433 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
434 396e1b78 Michael Hanselmann
435 396e1b78 Michael Hanselmann
436 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
437 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
438 bf6929a2 Alexander Schreiber

439 bf6929a2 Alexander Schreiber
  """
440 bf6929a2 Alexander Schreiber
  # check bridges existance
441 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
442 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
443 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
444 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
445 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
446 bf6929a2 Alexander Schreiber
447 bf6929a2 Alexander Schreiber
448 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
449 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
450 a8083063 Iustin Pop

451 a8083063 Iustin Pop
  """
452 a8083063 Iustin Pop
  _OP_REQP = []
453 a8083063 Iustin Pop
454 a8083063 Iustin Pop
  def CheckPrereq(self):
455 a8083063 Iustin Pop
    """Check prerequisites.
456 a8083063 Iustin Pop

457 a8083063 Iustin Pop
    This checks whether the cluster is empty.
458 a8083063 Iustin Pop

459 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
460 a8083063 Iustin Pop

461 a8083063 Iustin Pop
    """
462 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
463 a8083063 Iustin Pop
464 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
465 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
466 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
467 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
468 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
469 db915bd1 Michael Hanselmann
    if instancelist:
470 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
471 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
472 a8083063 Iustin Pop
473 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
474 a8083063 Iustin Pop
    """Destroys the cluster.
475 a8083063 Iustin Pop

476 a8083063 Iustin Pop
    """
477 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
478 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
479 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
480 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
481 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
482 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
483 140aa4a8 Iustin Pop
    return master
484 a8083063 Iustin Pop
485 a8083063 Iustin Pop
486 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
487 a8083063 Iustin Pop
  """Verifies the cluster status.
488 a8083063 Iustin Pop

489 a8083063 Iustin Pop
  """
490 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
491 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
492 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
493 a8083063 Iustin Pop
494 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
495 a8083063 Iustin Pop
                  remote_version, feedback_fn):
496 a8083063 Iustin Pop
    """Run multiple tests against a node.
497 a8083063 Iustin Pop

498 a8083063 Iustin Pop
    Test list:
499 a8083063 Iustin Pop
      - compares ganeti version
500 a8083063 Iustin Pop
      - checks vg existance and size > 20G
501 a8083063 Iustin Pop
      - checks config file checksum
502 a8083063 Iustin Pop
      - checks ssh to other nodes
503 a8083063 Iustin Pop

504 a8083063 Iustin Pop
    Args:
505 a8083063 Iustin Pop
      node: name of the node to check
506 a8083063 Iustin Pop
      file_list: required list of files
507 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
508 098c0958 Michael Hanselmann

509 a8083063 Iustin Pop
    """
510 a8083063 Iustin Pop
    # compares ganeti version
511 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
512 a8083063 Iustin Pop
    if not remote_version:
513 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
514 a8083063 Iustin Pop
      return True
515 a8083063 Iustin Pop
516 a8083063 Iustin Pop
    if local_version != remote_version:
517 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
518 a8083063 Iustin Pop
                      (local_version, node, remote_version))
519 a8083063 Iustin Pop
      return True
520 a8083063 Iustin Pop
521 a8083063 Iustin Pop
    # checks vg existance and size > 20G
522 a8083063 Iustin Pop
523 a8083063 Iustin Pop
    bad = False
524 a8083063 Iustin Pop
    if not vglist:
525 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
526 a8083063 Iustin Pop
                      (node,))
527 a8083063 Iustin Pop
      bad = True
528 a8083063 Iustin Pop
    else:
529 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
530 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
531 a8083063 Iustin Pop
      if vgstatus:
532 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
533 a8083063 Iustin Pop
        bad = True
534 a8083063 Iustin Pop
535 a8083063 Iustin Pop
    # checks config file checksum
536 a8083063 Iustin Pop
    # checks ssh to any
537 a8083063 Iustin Pop
538 a8083063 Iustin Pop
    if 'filelist' not in node_result:
539 a8083063 Iustin Pop
      bad = True
540 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
541 a8083063 Iustin Pop
    else:
542 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
543 a8083063 Iustin Pop
      for file_name in file_list:
544 a8083063 Iustin Pop
        if file_name not in remote_cksum:
545 a8083063 Iustin Pop
          bad = True
546 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
547 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
548 a8083063 Iustin Pop
          bad = True
549 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
550 a8083063 Iustin Pop
551 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
552 a8083063 Iustin Pop
      bad = True
553 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
554 a8083063 Iustin Pop
    else:
555 a8083063 Iustin Pop
      if node_result['nodelist']:
556 a8083063 Iustin Pop
        bad = True
557 a8083063 Iustin Pop
        for node in node_result['nodelist']:
558 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
559 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
560 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
561 9d4bfc96 Iustin Pop
      bad = True
562 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
563 9d4bfc96 Iustin Pop
    else:
564 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
565 9d4bfc96 Iustin Pop
        bad = True
566 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
567 9d4bfc96 Iustin Pop
        for node in nlist:
568 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
569 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
570 9d4bfc96 Iustin Pop
571 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
572 a8083063 Iustin Pop
    if hyp_result is not None:
573 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
574 a8083063 Iustin Pop
    return bad
575 a8083063 Iustin Pop
576 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
577 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
578 a8083063 Iustin Pop
    """Verify an instance.
579 a8083063 Iustin Pop

580 a8083063 Iustin Pop
    This function checks to see if the required block devices are
581 a8083063 Iustin Pop
    available on the instance's node.
582 a8083063 Iustin Pop

583 a8083063 Iustin Pop
    """
584 a8083063 Iustin Pop
    bad = False
585 a8083063 Iustin Pop
586 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
587 a8083063 Iustin Pop
588 a8083063 Iustin Pop
    node_vol_should = {}
589 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
590 a8083063 Iustin Pop
591 a8083063 Iustin Pop
    for node in node_vol_should:
592 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
593 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
594 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
595 a8083063 Iustin Pop
                          (volume, node))
596 a8083063 Iustin Pop
          bad = True
597 a8083063 Iustin Pop
598 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
599 a872dae6 Guido Trotter
      if (node_current not in node_instance or
600 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
601 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
602 a8083063 Iustin Pop
                        (instance, node_current))
603 a8083063 Iustin Pop
        bad = True
604 a8083063 Iustin Pop
605 a8083063 Iustin Pop
    for node in node_instance:
606 a8083063 Iustin Pop
      if (not node == node_current):
607 a8083063 Iustin Pop
        if instance in node_instance[node]:
608 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
609 a8083063 Iustin Pop
                          (instance, node))
610 a8083063 Iustin Pop
          bad = True
611 a8083063 Iustin Pop
612 6a438c98 Michael Hanselmann
    return bad
613 a8083063 Iustin Pop
614 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
615 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
616 a8083063 Iustin Pop

617 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
618 a8083063 Iustin Pop
    reported as unknown.
619 a8083063 Iustin Pop

620 a8083063 Iustin Pop
    """
621 a8083063 Iustin Pop
    bad = False
622 a8083063 Iustin Pop
623 a8083063 Iustin Pop
    for node in node_vol_is:
624 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
625 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
626 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
627 a8083063 Iustin Pop
                      (volume, node))
628 a8083063 Iustin Pop
          bad = True
629 a8083063 Iustin Pop
    return bad
630 a8083063 Iustin Pop
631 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
632 a8083063 Iustin Pop
    """Verify the list of running instances.
633 a8083063 Iustin Pop

634 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
635 a8083063 Iustin Pop

636 a8083063 Iustin Pop
    """
637 a8083063 Iustin Pop
    bad = False
638 a8083063 Iustin Pop
    for node in node_instance:
639 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
640 a8083063 Iustin Pop
        if runninginstance not in instancelist:
641 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
642 a8083063 Iustin Pop
                          (runninginstance, node))
643 a8083063 Iustin Pop
          bad = True
644 a8083063 Iustin Pop
    return bad
645 a8083063 Iustin Pop
646 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
647 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
648 2b3b6ddd Guido Trotter

649 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
650 2b3b6ddd Guido Trotter
    was primary for.
651 2b3b6ddd Guido Trotter

652 2b3b6ddd Guido Trotter
    """
653 2b3b6ddd Guido Trotter
    bad = False
654 2b3b6ddd Guido Trotter
655 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
656 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
657 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
658 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
659 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
660 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
661 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
662 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
663 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
664 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
665 2b3b6ddd Guido Trotter
        needed_mem = 0
666 2b3b6ddd Guido Trotter
        for instance in instances:
667 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
668 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
669 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
670 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
671 2b3b6ddd Guido Trotter
          bad = True
672 2b3b6ddd Guido Trotter
    return bad
673 2b3b6ddd Guido Trotter
674 a8083063 Iustin Pop
  def CheckPrereq(self):
675 a8083063 Iustin Pop
    """Check prerequisites.
676 a8083063 Iustin Pop

677 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
678 e54c4c5e Guido Trotter
    all its members are valid.
679 a8083063 Iustin Pop

680 a8083063 Iustin Pop
    """
681 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
682 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
683 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
684 a8083063 Iustin Pop
685 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
686 d8fff41c Guido Trotter
    """Build hooks env.
687 d8fff41c Guido Trotter

688 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
689 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
690 d8fff41c Guido Trotter

691 d8fff41c Guido Trotter
    """
692 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
693 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
694 d8fff41c Guido Trotter
    env = {}
695 d8fff41c Guido Trotter
    return env, [], all_nodes
696 d8fff41c Guido Trotter
697 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
698 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
699 a8083063 Iustin Pop

700 a8083063 Iustin Pop
    """
701 a8083063 Iustin Pop
    bad = False
702 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
703 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
704 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
705 a8083063 Iustin Pop
706 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
707 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
708 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
709 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
710 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
711 a8083063 Iustin Pop
    node_volume = {}
712 a8083063 Iustin Pop
    node_instance = {}
713 9c9c7d30 Guido Trotter
    node_info = {}
714 26b6af5e Guido Trotter
    instance_cfg = {}
715 a8083063 Iustin Pop
716 a8083063 Iustin Pop
    # FIXME: verify OS list
717 a8083063 Iustin Pop
    # do local checksums
718 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
719 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
720 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
721 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
722 a8083063 Iustin Pop
723 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
724 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
725 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
726 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
727 a8083063 Iustin Pop
    node_verify_param = {
728 a8083063 Iustin Pop
      'filelist': file_names,
729 a8083063 Iustin Pop
      'nodelist': nodelist,
730 a8083063 Iustin Pop
      'hypervisor': None,
731 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
732 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
733 a8083063 Iustin Pop
      }
734 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
735 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
736 9c9c7d30 Guido Trotter
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
737 a8083063 Iustin Pop
738 a8083063 Iustin Pop
    for node in nodelist:
739 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
740 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
741 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
742 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
743 a8083063 Iustin Pop
      bad = bad or result
744 a8083063 Iustin Pop
745 a8083063 Iustin Pop
      # node_volume
746 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
747 a8083063 Iustin Pop
748 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
749 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
750 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
751 b63ed789 Iustin Pop
        bad = True
752 b63ed789 Iustin Pop
        node_volume[node] = {}
753 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
754 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
755 a8083063 Iustin Pop
        bad = True
756 a8083063 Iustin Pop
        continue
757 b63ed789 Iustin Pop
      else:
758 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
759 a8083063 Iustin Pop
760 a8083063 Iustin Pop
      # node_instance
761 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
762 a8083063 Iustin Pop
      if type(nodeinstance) != list:
763 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
764 a8083063 Iustin Pop
        bad = True
765 a8083063 Iustin Pop
        continue
766 a8083063 Iustin Pop
767 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
768 a8083063 Iustin Pop
769 9c9c7d30 Guido Trotter
      # node_info
770 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
771 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
772 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
773 9c9c7d30 Guido Trotter
        bad = True
774 9c9c7d30 Guido Trotter
        continue
775 9c9c7d30 Guido Trotter
776 9c9c7d30 Guido Trotter
      try:
777 9c9c7d30 Guido Trotter
        node_info[node] = {
778 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
779 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
780 93e4c50b Guido Trotter
          "pinst": [],
781 93e4c50b Guido Trotter
          "sinst": [],
782 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
783 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
784 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
785 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
786 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
787 36e7da50 Guido Trotter
          # secondary.
788 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
789 9c9c7d30 Guido Trotter
        }
790 9c9c7d30 Guido Trotter
      except ValueError:
791 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
792 9c9c7d30 Guido Trotter
        bad = True
793 9c9c7d30 Guido Trotter
        continue
794 9c9c7d30 Guido Trotter
795 a8083063 Iustin Pop
    node_vol_should = {}
796 a8083063 Iustin Pop
797 a8083063 Iustin Pop
    for instance in instancelist:
798 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
799 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
800 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
801 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
802 c5705f58 Guido Trotter
      bad = bad or result
803 a8083063 Iustin Pop
804 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
805 a8083063 Iustin Pop
806 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
807 26b6af5e Guido Trotter
808 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
809 93e4c50b Guido Trotter
      if pnode in node_info:
810 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
811 93e4c50b Guido Trotter
      else:
812 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
813 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
814 93e4c50b Guido Trotter
        bad = True
815 93e4c50b Guido Trotter
816 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
817 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
818 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
819 93e4c50b Guido Trotter
      # supported either.
820 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
821 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
822 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
823 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
824 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
825 93e4c50b Guido Trotter
                    % instance)
826 93e4c50b Guido Trotter
827 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
828 93e4c50b Guido Trotter
        if snode in node_info:
829 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
830 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
831 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
832 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
833 93e4c50b Guido Trotter
        else:
834 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
835 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
836 93e4c50b Guido Trotter
837 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
838 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
839 a8083063 Iustin Pop
                                       feedback_fn)
840 a8083063 Iustin Pop
    bad = bad or result
841 a8083063 Iustin Pop
842 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
843 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
844 a8083063 Iustin Pop
                                         feedback_fn)
845 a8083063 Iustin Pop
    bad = bad or result
846 a8083063 Iustin Pop
847 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
848 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
849 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
850 e54c4c5e Guido Trotter
      bad = bad or result
851 2b3b6ddd Guido Trotter
852 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
853 2b3b6ddd Guido Trotter
    if i_non_redundant:
854 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
855 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
856 2b3b6ddd Guido Trotter
857 34290825 Michael Hanselmann
    return not bad
858 a8083063 Iustin Pop
859 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
860 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
861 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
862 d8fff41c Guido Trotter

863 d8fff41c Guido Trotter
    Args:
864 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
865 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
866 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
867 d8fff41c Guido Trotter
      lu_result: previous Exec result
868 d8fff41c Guido Trotter

869 d8fff41c Guido Trotter
    """
870 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
871 38206f3c Iustin Pop
    # their results
872 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
873 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
874 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
875 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
876 d8fff41c Guido Trotter
      if not hooks_results:
877 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
878 d8fff41c Guido Trotter
        lu_result = 1
879 d8fff41c Guido Trotter
      else:
880 d8fff41c Guido Trotter
        for node_name in hooks_results:
881 d8fff41c Guido Trotter
          show_node_header = True
882 d8fff41c Guido Trotter
          res = hooks_results[node_name]
883 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
884 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
885 d8fff41c Guido Trotter
            lu_result = 1
886 d8fff41c Guido Trotter
            continue
887 d8fff41c Guido Trotter
          for script, hkr, output in res:
888 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
889 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
890 d8fff41c Guido Trotter
              # failing hooks on that node
891 d8fff41c Guido Trotter
              if show_node_header:
892 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
893 d8fff41c Guido Trotter
                show_node_header = False
894 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
895 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
896 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
897 d8fff41c Guido Trotter
              lu_result = 1
898 d8fff41c Guido Trotter
899 d8fff41c Guido Trotter
      return lu_result
900 d8fff41c Guido Trotter
901 a8083063 Iustin Pop
902 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
903 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
904 2c95a8d4 Iustin Pop

905 2c95a8d4 Iustin Pop
  """
906 2c95a8d4 Iustin Pop
  _OP_REQP = []
907 2c95a8d4 Iustin Pop
908 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
909 2c95a8d4 Iustin Pop
    """Check prerequisites.
910 2c95a8d4 Iustin Pop

911 2c95a8d4 Iustin Pop
    This has no prerequisites.
912 2c95a8d4 Iustin Pop

913 2c95a8d4 Iustin Pop
    """
914 2c95a8d4 Iustin Pop
    pass
915 2c95a8d4 Iustin Pop
916 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
917 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
918 2c95a8d4 Iustin Pop

919 2c95a8d4 Iustin Pop
    """
920 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
921 2c95a8d4 Iustin Pop
922 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
923 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
924 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
925 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
926 2c95a8d4 Iustin Pop
927 2c95a8d4 Iustin Pop
    nv_dict = {}
928 2c95a8d4 Iustin Pop
    for inst in instances:
929 2c95a8d4 Iustin Pop
      inst_lvs = {}
930 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
931 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
932 2c95a8d4 Iustin Pop
        continue
933 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
934 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
935 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
936 2c95a8d4 Iustin Pop
        for vol in vol_list:
937 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
938 2c95a8d4 Iustin Pop
939 2c95a8d4 Iustin Pop
    if not nv_dict:
940 2c95a8d4 Iustin Pop
      return result
941 2c95a8d4 Iustin Pop
942 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
943 2c95a8d4 Iustin Pop
944 2c95a8d4 Iustin Pop
    to_act = set()
945 2c95a8d4 Iustin Pop
    for node in nodes:
946 2c95a8d4 Iustin Pop
      # node_volume
947 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
948 2c95a8d4 Iustin Pop
949 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
950 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
951 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
952 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
953 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
954 2c95a8d4 Iustin Pop
                    (node,))
955 2c95a8d4 Iustin Pop
        res_nodes.append(node)
956 2c95a8d4 Iustin Pop
        continue
957 2c95a8d4 Iustin Pop
958 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
959 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
960 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
961 b63ed789 Iustin Pop
            and inst.name not in res_instances):
962 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
963 2c95a8d4 Iustin Pop
964 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
965 b63ed789 Iustin Pop
    # data better
966 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
967 b63ed789 Iustin Pop
      if inst.name not in res_missing:
968 b63ed789 Iustin Pop
        res_missing[inst.name] = []
969 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
970 b63ed789 Iustin Pop
971 2c95a8d4 Iustin Pop
    return result
972 2c95a8d4 Iustin Pop
973 2c95a8d4 Iustin Pop
974 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
975 07bd8a51 Iustin Pop
  """Rename the cluster.
976 07bd8a51 Iustin Pop

977 07bd8a51 Iustin Pop
  """
978 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
979 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
980 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
981 05f86716 Guido Trotter
  REQ_WSSTORE = True
982 07bd8a51 Iustin Pop
983 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
984 07bd8a51 Iustin Pop
    """Build hooks env.
985 07bd8a51 Iustin Pop

986 07bd8a51 Iustin Pop
    """
987 07bd8a51 Iustin Pop
    env = {
988 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
989 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
990 07bd8a51 Iustin Pop
      }
991 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
992 07bd8a51 Iustin Pop
    return env, [mn], [mn]
993 07bd8a51 Iustin Pop
994 07bd8a51 Iustin Pop
  def CheckPrereq(self):
995 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
996 07bd8a51 Iustin Pop

997 07bd8a51 Iustin Pop
    """
998 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
999 07bd8a51 Iustin Pop
1000 bcf043c9 Iustin Pop
    new_name = hostname.name
1001 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1002 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
1003 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
1004 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1005 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1006 07bd8a51 Iustin Pop
                                 " cluster has changed")
1007 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1008 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1009 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1010 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1011 07bd8a51 Iustin Pop
                                   new_ip)
1012 07bd8a51 Iustin Pop
1013 07bd8a51 Iustin Pop
    self.op.name = new_name
1014 07bd8a51 Iustin Pop
1015 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1016 07bd8a51 Iustin Pop
    """Rename the cluster.
1017 07bd8a51 Iustin Pop

1018 07bd8a51 Iustin Pop
    """
1019 07bd8a51 Iustin Pop
    clustername = self.op.name
1020 07bd8a51 Iustin Pop
    ip = self.ip
1021 07bd8a51 Iustin Pop
    ss = self.sstore
1022 07bd8a51 Iustin Pop
1023 07bd8a51 Iustin Pop
    # shutdown the master IP
1024 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
1025 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
1026 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1027 07bd8a51 Iustin Pop
1028 07bd8a51 Iustin Pop
    try:
1029 07bd8a51 Iustin Pop
      # modify the sstore
1030 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1031 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1032 07bd8a51 Iustin Pop
1033 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1034 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1035 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1036 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1037 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1038 07bd8a51 Iustin Pop
1039 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1040 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1041 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1042 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1043 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1044 07bd8a51 Iustin Pop
          if not result[to_node]:
1045 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1046 07bd8a51 Iustin Pop
                         (fname, to_node))
1047 07bd8a51 Iustin Pop
    finally:
1048 1c65840b Iustin Pop
      if not rpc.call_node_start_master(master, False):
1049 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1050 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1051 07bd8a51 Iustin Pop
1052 07bd8a51 Iustin Pop
1053 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1054 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1055 8084f9f6 Manuel Franceschini

1056 8084f9f6 Manuel Franceschini
  Args:
1057 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1058 8084f9f6 Manuel Franceschini

1059 8084f9f6 Manuel Franceschini
  Returns:
1060 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1061 8084f9f6 Manuel Franceschini

1062 8084f9f6 Manuel Franceschini
  """
1063 8084f9f6 Manuel Franceschini
  if disk.children:
1064 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1065 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1066 8084f9f6 Manuel Franceschini
        return True
1067 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1068 8084f9f6 Manuel Franceschini
1069 8084f9f6 Manuel Franceschini
1070 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1071 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1072 8084f9f6 Manuel Franceschini

1073 8084f9f6 Manuel Franceschini
  """
1074 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1075 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1076 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1077 8084f9f6 Manuel Franceschini
1078 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1079 8084f9f6 Manuel Franceschini
    """Build hooks env.
1080 8084f9f6 Manuel Franceschini

1081 8084f9f6 Manuel Franceschini
    """
1082 8084f9f6 Manuel Franceschini
    env = {
1083 8084f9f6 Manuel Franceschini
      "OP_TARGET": self.sstore.GetClusterName(),
1084 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1085 8084f9f6 Manuel Franceschini
      }
1086 8084f9f6 Manuel Franceschini
    mn = self.sstore.GetMasterNode()
1087 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1088 8084f9f6 Manuel Franceschini
1089 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1090 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1091 8084f9f6 Manuel Franceschini

1092 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1093 5f83e263 Iustin Pop
    if the given volume group is valid.
1094 8084f9f6 Manuel Franceschini

1095 8084f9f6 Manuel Franceschini
    """
1096 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
1097 8084f9f6 Manuel Franceschini
      instances = [self.cfg.GetInstanceInfo(name)
1098 8084f9f6 Manuel Franceschini
                   for name in self.cfg.GetInstanceList()]
1099 8084f9f6 Manuel Franceschini
      for inst in instances:
1100 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1101 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1102 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1103 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1104 8084f9f6 Manuel Franceschini
1105 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1106 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1107 8084f9f6 Manuel Franceschini
      node_list = self.cfg.GetNodeList()
1108 8084f9f6 Manuel Franceschini
      vglist = rpc.call_vg_list(node_list)
1109 8084f9f6 Manuel Franceschini
      for node in node_list:
1110 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1111 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1112 8084f9f6 Manuel Franceschini
        if vgstatus:
1113 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1114 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1115 8084f9f6 Manuel Franceschini
1116 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1117 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1118 8084f9f6 Manuel Franceschini

1119 8084f9f6 Manuel Franceschini
    """
1120 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
1121 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
1122 8084f9f6 Manuel Franceschini
    else:
1123 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1124 8084f9f6 Manuel Franceschini
                  " state, not changing")
1125 8084f9f6 Manuel Franceschini
1126 8084f9f6 Manuel Franceschini
1127 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1128 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1129 a8083063 Iustin Pop

1130 a8083063 Iustin Pop
  """
1131 a8083063 Iustin Pop
  if not instance.disks:
1132 a8083063 Iustin Pop
    return True
1133 a8083063 Iustin Pop
1134 a8083063 Iustin Pop
  if not oneshot:
1135 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1136 a8083063 Iustin Pop
1137 a8083063 Iustin Pop
  node = instance.primary_node
1138 a8083063 Iustin Pop
1139 a8083063 Iustin Pop
  for dev in instance.disks:
1140 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1141 a8083063 Iustin Pop
1142 a8083063 Iustin Pop
  retries = 0
1143 a8083063 Iustin Pop
  while True:
1144 a8083063 Iustin Pop
    max_time = 0
1145 a8083063 Iustin Pop
    done = True
1146 a8083063 Iustin Pop
    cumul_degraded = False
1147 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1148 a8083063 Iustin Pop
    if not rstats:
1149 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1150 a8083063 Iustin Pop
      retries += 1
1151 a8083063 Iustin Pop
      if retries >= 10:
1152 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1153 3ecf6786 Iustin Pop
                                 " aborting." % node)
1154 a8083063 Iustin Pop
      time.sleep(6)
1155 a8083063 Iustin Pop
      continue
1156 a8083063 Iustin Pop
    retries = 0
1157 a8083063 Iustin Pop
    for i in range(len(rstats)):
1158 a8083063 Iustin Pop
      mstat = rstats[i]
1159 a8083063 Iustin Pop
      if mstat is None:
1160 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1161 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1162 a8083063 Iustin Pop
        continue
1163 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1164 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1165 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1166 a8083063 Iustin Pop
      if perc_done is not None:
1167 a8083063 Iustin Pop
        done = False
1168 a8083063 Iustin Pop
        if est_time is not None:
1169 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1170 a8083063 Iustin Pop
          max_time = est_time
1171 a8083063 Iustin Pop
        else:
1172 a8083063 Iustin Pop
          rem_time = "no time estimate"
1173 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1174 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1175 a8083063 Iustin Pop
    if done or oneshot:
1176 a8083063 Iustin Pop
      break
1177 a8083063 Iustin Pop
1178 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1179 a8083063 Iustin Pop
1180 a8083063 Iustin Pop
  if done:
1181 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1182 a8083063 Iustin Pop
  return not cumul_degraded
1183 a8083063 Iustin Pop
1184 a8083063 Iustin Pop
1185 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1186 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1187 a8083063 Iustin Pop

1188 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1189 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1190 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1191 0834c866 Iustin Pop

1192 a8083063 Iustin Pop
  """
1193 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1194 0834c866 Iustin Pop
  if ldisk:
1195 0834c866 Iustin Pop
    idx = 6
1196 0834c866 Iustin Pop
  else:
1197 0834c866 Iustin Pop
    idx = 5
1198 a8083063 Iustin Pop
1199 a8083063 Iustin Pop
  result = True
1200 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1201 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1202 a8083063 Iustin Pop
    if not rstats:
1203 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1204 a8083063 Iustin Pop
      result = False
1205 a8083063 Iustin Pop
    else:
1206 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1207 a8083063 Iustin Pop
  if dev.children:
1208 a8083063 Iustin Pop
    for child in dev.children:
1209 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1210 a8083063 Iustin Pop
1211 a8083063 Iustin Pop
  return result
1212 a8083063 Iustin Pop
1213 a8083063 Iustin Pop
1214 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1215 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1216 a8083063 Iustin Pop

1217 a8083063 Iustin Pop
  """
1218 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1219 6bf01bbb Guido Trotter
  REQ_BGL = False
1220 a8083063 Iustin Pop
1221 6bf01bbb Guido Trotter
  def ExpandNames(self):
1222 1f9430d6 Iustin Pop
    if self.op.names:
1223 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1224 1f9430d6 Iustin Pop
1225 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1226 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1227 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1228 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1229 1f9430d6 Iustin Pop
1230 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1231 6bf01bbb Guido Trotter
    self.needed_locks = {}
1232 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1233 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1234 6bf01bbb Guido Trotter
1235 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1236 6bf01bbb Guido Trotter
    """Check prerequisites.
1237 6bf01bbb Guido Trotter

1238 6bf01bbb Guido Trotter
    """
1239 6bf01bbb Guido Trotter
1240 1f9430d6 Iustin Pop
  @staticmethod
1241 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1242 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1243 1f9430d6 Iustin Pop

1244 1f9430d6 Iustin Pop
      Args:
1245 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1246 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1247 1f9430d6 Iustin Pop

1248 1f9430d6 Iustin Pop
      Returns:
1249 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1250 1f9430d6 Iustin Pop
             nodes as
1251 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1252 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1253 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1254 1f9430d6 Iustin Pop
                  }
1255 1f9430d6 Iustin Pop

1256 1f9430d6 Iustin Pop
    """
1257 1f9430d6 Iustin Pop
    all_os = {}
1258 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1259 1f9430d6 Iustin Pop
      if not nr:
1260 1f9430d6 Iustin Pop
        continue
1261 b4de68a9 Iustin Pop
      for os_obj in nr:
1262 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1263 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1264 1f9430d6 Iustin Pop
          # for each node in node_list
1265 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1266 1f9430d6 Iustin Pop
          for nname in node_list:
1267 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1268 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1269 1f9430d6 Iustin Pop
    return all_os
1270 a8083063 Iustin Pop
1271 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1272 a8083063 Iustin Pop
    """Compute the list of OSes.
1273 a8083063 Iustin Pop

1274 a8083063 Iustin Pop
    """
1275 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1276 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1277 a8083063 Iustin Pop
    if node_data == False:
1278 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1279 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1280 1f9430d6 Iustin Pop
    output = []
1281 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1282 1f9430d6 Iustin Pop
      row = []
1283 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1284 1f9430d6 Iustin Pop
        if field == "name":
1285 1f9430d6 Iustin Pop
          val = os_name
1286 1f9430d6 Iustin Pop
        elif field == "valid":
1287 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1288 1f9430d6 Iustin Pop
        elif field == "node_status":
1289 1f9430d6 Iustin Pop
          val = {}
1290 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1291 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1292 1f9430d6 Iustin Pop
        else:
1293 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1294 1f9430d6 Iustin Pop
        row.append(val)
1295 1f9430d6 Iustin Pop
      output.append(row)
1296 1f9430d6 Iustin Pop
1297 1f9430d6 Iustin Pop
    return output
1298 a8083063 Iustin Pop
1299 a8083063 Iustin Pop
1300 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1301 a8083063 Iustin Pop
  """Logical unit for removing a node.
1302 a8083063 Iustin Pop

1303 a8083063 Iustin Pop
  """
1304 a8083063 Iustin Pop
  HPATH = "node-remove"
1305 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1306 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1307 a8083063 Iustin Pop
1308 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1309 a8083063 Iustin Pop
    """Build hooks env.
1310 a8083063 Iustin Pop

1311 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1312 d08869ee Guido Trotter
    node would then be impossible to remove.
1313 a8083063 Iustin Pop

1314 a8083063 Iustin Pop
    """
1315 396e1b78 Michael Hanselmann
    env = {
1316 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1317 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1318 396e1b78 Michael Hanselmann
      }
1319 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1320 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1321 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1322 a8083063 Iustin Pop
1323 a8083063 Iustin Pop
  def CheckPrereq(self):
1324 a8083063 Iustin Pop
    """Check prerequisites.
1325 a8083063 Iustin Pop

1326 a8083063 Iustin Pop
    This checks:
1327 a8083063 Iustin Pop
     - the node exists in the configuration
1328 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1329 a8083063 Iustin Pop
     - it's not the master
1330 a8083063 Iustin Pop

1331 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1332 a8083063 Iustin Pop

1333 a8083063 Iustin Pop
    """
1334 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1335 a8083063 Iustin Pop
    if node is None:
1336 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1337 a8083063 Iustin Pop
1338 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1339 a8083063 Iustin Pop
1340 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1341 a8083063 Iustin Pop
    if node.name == masternode:
1342 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1343 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1344 a8083063 Iustin Pop
1345 a8083063 Iustin Pop
    for instance_name in instance_list:
1346 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1347 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1348 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1349 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1350 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1351 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1352 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1353 a8083063 Iustin Pop
    self.op.node_name = node.name
1354 a8083063 Iustin Pop
    self.node = node
1355 a8083063 Iustin Pop
1356 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1357 a8083063 Iustin Pop
    """Removes the node from the cluster.
1358 a8083063 Iustin Pop

1359 a8083063 Iustin Pop
    """
1360 a8083063 Iustin Pop
    node = self.node
1361 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1362 a8083063 Iustin Pop
                node.name)
1363 a8083063 Iustin Pop
1364 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1365 a8083063 Iustin Pop
1366 d8470559 Michael Hanselmann
    rpc.call_node_leave_cluster(node.name)
1367 c8a0948f Michael Hanselmann
1368 a8083063 Iustin Pop
1369 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1370 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1371 a8083063 Iustin Pop

1372 a8083063 Iustin Pop
  """
1373 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1374 35705d8f Guido Trotter
  REQ_BGL = False
1375 a8083063 Iustin Pop
1376 35705d8f Guido Trotter
  def ExpandNames(self):
1377 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1378 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1379 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1380 e8a4c138 Iustin Pop
      "bootid",
1381 e8a4c138 Iustin Pop
      "ctotal",
1382 e8a4c138 Iustin Pop
      ])
1383 a8083063 Iustin Pop
1384 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1385 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1386 130a6a6f Iustin Pop
                               "pip", "sip", "tags"],
1387 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1388 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1389 a8083063 Iustin Pop
1390 35705d8f Guido Trotter
    self.needed_locks = {}
1391 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1392 35705d8f Guido Trotter
    # TODO: we could lock nodes only if the user asked for dynamic fields. For
1393 35705d8f Guido Trotter
    # that we need atomic ways to get info for a group of nodes from the
1394 35705d8f Guido Trotter
    # config, though.
1395 35705d8f Guido Trotter
    if not self.op.names:
1396 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1397 35705d8f Guido Trotter
    else:
1398 b91a34a5 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1399 b91a34a5 Guido Trotter
        _GetWantedNodes(self, self.op.names)
1400 35705d8f Guido Trotter
1401 35705d8f Guido Trotter
  def CheckPrereq(self):
1402 35705d8f Guido Trotter
    """Check prerequisites.
1403 35705d8f Guido Trotter

1404 35705d8f Guido Trotter
    """
1405 35705d8f Guido Trotter
    # This of course is valid only if we locked the nodes
1406 6683bba2 Guido Trotter
    self.wanted = self.acquired_locks[locking.LEVEL_NODE]
1407 a8083063 Iustin Pop
1408 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1409 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1410 a8083063 Iustin Pop

1411 a8083063 Iustin Pop
    """
1412 246e180a Iustin Pop
    nodenames = self.wanted
1413 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1414 a8083063 Iustin Pop
1415 a8083063 Iustin Pop
    # begin data gathering
1416 a8083063 Iustin Pop
1417 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1418 a8083063 Iustin Pop
      live_data = {}
1419 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1420 a8083063 Iustin Pop
      for name in nodenames:
1421 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1422 a8083063 Iustin Pop
        if nodeinfo:
1423 a8083063 Iustin Pop
          live_data[name] = {
1424 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1425 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1426 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1427 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1428 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1429 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1430 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1431 a8083063 Iustin Pop
            }
1432 a8083063 Iustin Pop
        else:
1433 a8083063 Iustin Pop
          live_data[name] = {}
1434 a8083063 Iustin Pop
    else:
1435 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1436 a8083063 Iustin Pop
1437 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1438 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1439 a8083063 Iustin Pop
1440 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1441 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1442 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1443 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1444 a8083063 Iustin Pop
1445 ec223efb Iustin Pop
      for instance_name in instancelist:
1446 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1447 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1448 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1449 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1450 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1451 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1452 a8083063 Iustin Pop
1453 a8083063 Iustin Pop
    # end data gathering
1454 a8083063 Iustin Pop
1455 a8083063 Iustin Pop
    output = []
1456 a8083063 Iustin Pop
    for node in nodelist:
1457 a8083063 Iustin Pop
      node_output = []
1458 a8083063 Iustin Pop
      for field in self.op.output_fields:
1459 a8083063 Iustin Pop
        if field == "name":
1460 a8083063 Iustin Pop
          val = node.name
1461 ec223efb Iustin Pop
        elif field == "pinst_list":
1462 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1463 ec223efb Iustin Pop
        elif field == "sinst_list":
1464 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1465 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1466 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1467 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1468 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1469 a8083063 Iustin Pop
        elif field == "pip":
1470 a8083063 Iustin Pop
          val = node.primary_ip
1471 a8083063 Iustin Pop
        elif field == "sip":
1472 a8083063 Iustin Pop
          val = node.secondary_ip
1473 130a6a6f Iustin Pop
        elif field == "tags":
1474 130a6a6f Iustin Pop
          val = list(node.GetTags())
1475 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1476 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1477 a8083063 Iustin Pop
        else:
1478 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1479 a8083063 Iustin Pop
        node_output.append(val)
1480 a8083063 Iustin Pop
      output.append(node_output)
1481 a8083063 Iustin Pop
1482 a8083063 Iustin Pop
    return output
1483 a8083063 Iustin Pop
1484 a8083063 Iustin Pop
1485 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1486 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1487 dcb93971 Michael Hanselmann

1488 dcb93971 Michael Hanselmann
  """
1489 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1490 21a15682 Guido Trotter
  REQ_BGL = False
1491 21a15682 Guido Trotter
1492 21a15682 Guido Trotter
  def ExpandNames(self):
1493 21a15682 Guido Trotter
    _CheckOutputFields(static=["node"],
1494 21a15682 Guido Trotter
                       dynamic=["phys", "vg", "name", "size", "instance"],
1495 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1496 21a15682 Guido Trotter
1497 21a15682 Guido Trotter
    self.needed_locks = {}
1498 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1499 21a15682 Guido Trotter
    if not self.op.nodes:
1500 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1501 21a15682 Guido Trotter
    else:
1502 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1503 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1504 dcb93971 Michael Hanselmann
1505 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1506 dcb93971 Michael Hanselmann
    """Check prerequisites.
1507 dcb93971 Michael Hanselmann

1508 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1509 dcb93971 Michael Hanselmann

1510 dcb93971 Michael Hanselmann
    """
1511 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1512 dcb93971 Michael Hanselmann
1513 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1514 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1515 dcb93971 Michael Hanselmann

1516 dcb93971 Michael Hanselmann
    """
1517 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1518 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1519 dcb93971 Michael Hanselmann
1520 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1521 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1522 dcb93971 Michael Hanselmann
1523 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1524 dcb93971 Michael Hanselmann
1525 dcb93971 Michael Hanselmann
    output = []
1526 dcb93971 Michael Hanselmann
    for node in nodenames:
1527 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1528 37d19eb2 Michael Hanselmann
        continue
1529 37d19eb2 Michael Hanselmann
1530 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1531 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1532 dcb93971 Michael Hanselmann
1533 dcb93971 Michael Hanselmann
      for vol in node_vols:
1534 dcb93971 Michael Hanselmann
        node_output = []
1535 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1536 dcb93971 Michael Hanselmann
          if field == "node":
1537 dcb93971 Michael Hanselmann
            val = node
1538 dcb93971 Michael Hanselmann
          elif field == "phys":
1539 dcb93971 Michael Hanselmann
            val = vol['dev']
1540 dcb93971 Michael Hanselmann
          elif field == "vg":
1541 dcb93971 Michael Hanselmann
            val = vol['vg']
1542 dcb93971 Michael Hanselmann
          elif field == "name":
1543 dcb93971 Michael Hanselmann
            val = vol['name']
1544 dcb93971 Michael Hanselmann
          elif field == "size":
1545 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1546 dcb93971 Michael Hanselmann
          elif field == "instance":
1547 dcb93971 Michael Hanselmann
            for inst in ilist:
1548 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1549 dcb93971 Michael Hanselmann
                continue
1550 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1551 dcb93971 Michael Hanselmann
                val = inst.name
1552 dcb93971 Michael Hanselmann
                break
1553 dcb93971 Michael Hanselmann
            else:
1554 dcb93971 Michael Hanselmann
              val = '-'
1555 dcb93971 Michael Hanselmann
          else:
1556 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1557 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1558 dcb93971 Michael Hanselmann
1559 dcb93971 Michael Hanselmann
        output.append(node_output)
1560 dcb93971 Michael Hanselmann
1561 dcb93971 Michael Hanselmann
    return output
1562 dcb93971 Michael Hanselmann
1563 dcb93971 Michael Hanselmann
1564 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1565 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1566 a8083063 Iustin Pop

1567 a8083063 Iustin Pop
  """
1568 a8083063 Iustin Pop
  HPATH = "node-add"
1569 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1570 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1571 a8083063 Iustin Pop
1572 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1573 a8083063 Iustin Pop
    """Build hooks env.
1574 a8083063 Iustin Pop

1575 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1576 a8083063 Iustin Pop

1577 a8083063 Iustin Pop
    """
1578 a8083063 Iustin Pop
    env = {
1579 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1580 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1581 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1582 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1583 a8083063 Iustin Pop
      }
1584 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1585 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1586 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1587 a8083063 Iustin Pop
1588 a8083063 Iustin Pop
  def CheckPrereq(self):
1589 a8083063 Iustin Pop
    """Check prerequisites.
1590 a8083063 Iustin Pop

1591 a8083063 Iustin Pop
    This checks:
1592 a8083063 Iustin Pop
     - the new node is not already in the config
1593 a8083063 Iustin Pop
     - it is resolvable
1594 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1595 a8083063 Iustin Pop

1596 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1597 a8083063 Iustin Pop

1598 a8083063 Iustin Pop
    """
1599 a8083063 Iustin Pop
    node_name = self.op.node_name
1600 a8083063 Iustin Pop
    cfg = self.cfg
1601 a8083063 Iustin Pop
1602 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1603 a8083063 Iustin Pop
1604 bcf043c9 Iustin Pop
    node = dns_data.name
1605 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1606 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1607 a8083063 Iustin Pop
    if secondary_ip is None:
1608 a8083063 Iustin Pop
      secondary_ip = primary_ip
1609 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1610 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1611 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1612 e7c6e02b Michael Hanselmann
1613 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1614 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1615 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1616 e7c6e02b Michael Hanselmann
                                 node)
1617 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1618 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1619 a8083063 Iustin Pop
1620 a8083063 Iustin Pop
    for existing_node_name in node_list:
1621 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1622 e7c6e02b Michael Hanselmann
1623 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1624 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1625 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1626 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1627 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1628 e7c6e02b Michael Hanselmann
        continue
1629 e7c6e02b Michael Hanselmann
1630 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1631 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1632 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1633 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1634 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1635 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1636 a8083063 Iustin Pop
1637 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1638 a8083063 Iustin Pop
    # same as for the master
1639 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1640 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1641 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1642 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1643 a8083063 Iustin Pop
      if master_singlehomed:
1644 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1645 3ecf6786 Iustin Pop
                                   " new node has one")
1646 a8083063 Iustin Pop
      else:
1647 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1648 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1649 a8083063 Iustin Pop
1650 a8083063 Iustin Pop
    # checks reachablity
1651 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1652 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1653 a8083063 Iustin Pop
1654 a8083063 Iustin Pop
    if not newbie_singlehomed:
1655 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1656 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1657 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1658 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1659 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1660 a8083063 Iustin Pop
1661 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1662 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1663 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1664 a8083063 Iustin Pop
1665 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1666 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1667 a8083063 Iustin Pop

1668 a8083063 Iustin Pop
    """
1669 a8083063 Iustin Pop
    new_node = self.new_node
1670 a8083063 Iustin Pop
    node = new_node.name
1671 a8083063 Iustin Pop
1672 a8083063 Iustin Pop
    # check connectivity
1673 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1674 a8083063 Iustin Pop
    if result:
1675 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1676 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1677 a8083063 Iustin Pop
                    (node, result))
1678 a8083063 Iustin Pop
      else:
1679 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1680 3ecf6786 Iustin Pop
                                 " node version %s" %
1681 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1682 a8083063 Iustin Pop
    else:
1683 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1684 a8083063 Iustin Pop
1685 a8083063 Iustin Pop
    # setup ssh on node
1686 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1687 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1688 a8083063 Iustin Pop
    keyarray = []
1689 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1690 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1691 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1692 a8083063 Iustin Pop
1693 a8083063 Iustin Pop
    for i in keyfiles:
1694 a8083063 Iustin Pop
      f = open(i, 'r')
1695 a8083063 Iustin Pop
      try:
1696 a8083063 Iustin Pop
        keyarray.append(f.read())
1697 a8083063 Iustin Pop
      finally:
1698 a8083063 Iustin Pop
        f.close()
1699 a8083063 Iustin Pop
1700 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1701 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1702 a8083063 Iustin Pop
1703 a8083063 Iustin Pop
    if not result:
1704 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1705 a8083063 Iustin Pop
1706 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1707 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1708 c8a0948f Michael Hanselmann
1709 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1710 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1711 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1712 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1713 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1714 16abfbc2 Alexander Schreiber
                                    10, False):
1715 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1716 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1717 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1718 a8083063 Iustin Pop
1719 5c0527ed Guido Trotter
    node_verify_list = [self.sstore.GetMasterNode()]
1720 5c0527ed Guido Trotter
    node_verify_param = {
1721 5c0527ed Guido Trotter
      'nodelist': [node],
1722 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1723 5c0527ed Guido Trotter
    }
1724 5c0527ed Guido Trotter
1725 5c0527ed Guido Trotter
    result = rpc.call_node_verify(node_verify_list, node_verify_param)
1726 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1727 5c0527ed Guido Trotter
      if not result[verifier]:
1728 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1729 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1730 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1731 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1732 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1733 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1734 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1735 ff98055b Iustin Pop
1736 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1737 a8083063 Iustin Pop
    # including the node just added
1738 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1739 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1740 102b115b Michael Hanselmann
    if not self.op.readd:
1741 102b115b Michael Hanselmann
      dist_nodes.append(node)
1742 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1743 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1744 a8083063 Iustin Pop
1745 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1746 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1747 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1748 a8083063 Iustin Pop
      for to_node in dist_nodes:
1749 a8083063 Iustin Pop
        if not result[to_node]:
1750 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1751 a8083063 Iustin Pop
                       (fname, to_node))
1752 a8083063 Iustin Pop
1753 3d1e7706 Guido Trotter
    to_copy = self.sstore.GetFileList()
1754 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1755 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1756 a8083063 Iustin Pop
    for fname in to_copy:
1757 b5602d15 Guido Trotter
      result = rpc.call_upload_file([node], fname)
1758 b5602d15 Guido Trotter
      if not result[node]:
1759 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1760 a8083063 Iustin Pop
1761 d8470559 Michael Hanselmann
    if self.op.readd:
1762 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1763 d8470559 Michael Hanselmann
    else:
1764 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1765 a8083063 Iustin Pop
1766 a8083063 Iustin Pop
1767 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1768 a8083063 Iustin Pop
  """Query cluster configuration.
1769 a8083063 Iustin Pop

1770 a8083063 Iustin Pop
  """
1771 a8083063 Iustin Pop
  _OP_REQP = []
1772 59322403 Iustin Pop
  REQ_MASTER = False
1773 642339cf Guido Trotter
  REQ_BGL = False
1774 642339cf Guido Trotter
1775 642339cf Guido Trotter
  def ExpandNames(self):
1776 642339cf Guido Trotter
    self.needed_locks = {}
1777 a8083063 Iustin Pop
1778 a8083063 Iustin Pop
  def CheckPrereq(self):
1779 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1780 a8083063 Iustin Pop

1781 a8083063 Iustin Pop
    """
1782 a8083063 Iustin Pop
    pass
1783 a8083063 Iustin Pop
1784 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1785 a8083063 Iustin Pop
    """Return cluster config.
1786 a8083063 Iustin Pop

1787 a8083063 Iustin Pop
    """
1788 a8083063 Iustin Pop
    result = {
1789 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1790 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1791 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1792 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1793 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1794 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1795 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1796 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1797 8a12ce45 Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
1798 a8083063 Iustin Pop
      }
1799 a8083063 Iustin Pop
1800 a8083063 Iustin Pop
    return result
1801 a8083063 Iustin Pop
1802 a8083063 Iustin Pop
1803 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1804 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1805 a8083063 Iustin Pop

1806 a8083063 Iustin Pop
  """
1807 a8083063 Iustin Pop
  _OP_REQP = []
1808 642339cf Guido Trotter
  REQ_BGL = False
1809 642339cf Guido Trotter
1810 642339cf Guido Trotter
  def ExpandNames(self):
1811 642339cf Guido Trotter
    self.needed_locks = {}
1812 a8083063 Iustin Pop
1813 a8083063 Iustin Pop
  def CheckPrereq(self):
1814 a8083063 Iustin Pop
    """No prerequisites.
1815 a8083063 Iustin Pop

1816 a8083063 Iustin Pop
    """
1817 a8083063 Iustin Pop
    pass
1818 a8083063 Iustin Pop
1819 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1820 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1821 a8083063 Iustin Pop

1822 a8083063 Iustin Pop
    """
1823 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1824 a8083063 Iustin Pop
1825 a8083063 Iustin Pop
1826 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1827 a8083063 Iustin Pop
  """Bring up an instance's disks.
1828 a8083063 Iustin Pop

1829 a8083063 Iustin Pop
  """
1830 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1831 a8083063 Iustin Pop
1832 a8083063 Iustin Pop
  def CheckPrereq(self):
1833 a8083063 Iustin Pop
    """Check prerequisites.
1834 a8083063 Iustin Pop

1835 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1836 a8083063 Iustin Pop

1837 a8083063 Iustin Pop
    """
1838 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1839 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1840 a8083063 Iustin Pop
    if instance is None:
1841 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1842 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1843 a8083063 Iustin Pop
    self.instance = instance
1844 a8083063 Iustin Pop
1845 a8083063 Iustin Pop
1846 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1847 a8083063 Iustin Pop
    """Activate the disks.
1848 a8083063 Iustin Pop

1849 a8083063 Iustin Pop
    """
1850 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1851 a8083063 Iustin Pop
    if not disks_ok:
1852 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1853 a8083063 Iustin Pop
1854 a8083063 Iustin Pop
    return disks_info
1855 a8083063 Iustin Pop
1856 a8083063 Iustin Pop
1857 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1858 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1859 a8083063 Iustin Pop

1860 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1861 a8083063 Iustin Pop

1862 a8083063 Iustin Pop
  Args:
1863 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1864 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1865 a8083063 Iustin Pop
                        in an error return from the function
1866 a8083063 Iustin Pop

1867 a8083063 Iustin Pop
  Returns:
1868 a8083063 Iustin Pop
    false if the operation failed
1869 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1870 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1871 a8083063 Iustin Pop
  """
1872 a8083063 Iustin Pop
  device_info = []
1873 a8083063 Iustin Pop
  disks_ok = True
1874 fdbd668d Iustin Pop
  iname = instance.name
1875 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1876 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1877 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1878 fdbd668d Iustin Pop
1879 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1880 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1881 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1882 fdbd668d Iustin Pop
  # SyncSource, etc.)
1883 fdbd668d Iustin Pop
1884 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1885 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1886 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1887 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1888 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1889 a8083063 Iustin Pop
      if not result:
1890 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1891 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1892 fdbd668d Iustin Pop
        if not ignore_secondaries:
1893 a8083063 Iustin Pop
          disks_ok = False
1894 fdbd668d Iustin Pop
1895 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1896 fdbd668d Iustin Pop
1897 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1898 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1899 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1900 fdbd668d Iustin Pop
      if node != instance.primary_node:
1901 fdbd668d Iustin Pop
        continue
1902 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
1903 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1904 fdbd668d Iustin Pop
      if not result:
1905 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1906 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1907 fdbd668d Iustin Pop
        disks_ok = False
1908 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1909 a8083063 Iustin Pop
1910 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1911 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1912 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1913 b352ab5b Iustin Pop
  for disk in instance.disks:
1914 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1915 b352ab5b Iustin Pop
1916 a8083063 Iustin Pop
  return disks_ok, device_info
1917 a8083063 Iustin Pop
1918 a8083063 Iustin Pop
1919 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1920 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1921 3ecf6786 Iustin Pop

1922 3ecf6786 Iustin Pop
  """
1923 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1924 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1925 fe7b0351 Michael Hanselmann
  if not disks_ok:
1926 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1927 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1928 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1929 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1930 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1931 fe7b0351 Michael Hanselmann
1932 fe7b0351 Michael Hanselmann
1933 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1934 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1935 a8083063 Iustin Pop

1936 a8083063 Iustin Pop
  """
1937 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1938 a8083063 Iustin Pop
1939 a8083063 Iustin Pop
  def CheckPrereq(self):
1940 a8083063 Iustin Pop
    """Check prerequisites.
1941 a8083063 Iustin Pop

1942 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1943 a8083063 Iustin Pop

1944 a8083063 Iustin Pop
    """
1945 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1946 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1947 a8083063 Iustin Pop
    if instance is None:
1948 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1949 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1950 a8083063 Iustin Pop
    self.instance = instance
1951 a8083063 Iustin Pop
1952 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1953 a8083063 Iustin Pop
    """Deactivate the disks
1954 a8083063 Iustin Pop

1955 a8083063 Iustin Pop
    """
1956 a8083063 Iustin Pop
    instance = self.instance
1957 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1958 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1959 a8083063 Iustin Pop
    if not type(ins_l) is list:
1960 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1961 3ecf6786 Iustin Pop
                               instance.primary_node)
1962 a8083063 Iustin Pop
1963 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1964 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1965 3ecf6786 Iustin Pop
                               " block devices.")
1966 a8083063 Iustin Pop
1967 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1968 a8083063 Iustin Pop
1969 a8083063 Iustin Pop
1970 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1971 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1972 a8083063 Iustin Pop

1973 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1974 a8083063 Iustin Pop

1975 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1976 a8083063 Iustin Pop
  ignored.
1977 a8083063 Iustin Pop

1978 a8083063 Iustin Pop
  """
1979 a8083063 Iustin Pop
  result = True
1980 a8083063 Iustin Pop
  for disk in instance.disks:
1981 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1982 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1983 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1984 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1985 a8083063 Iustin Pop
                     (disk.iv_name, node))
1986 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1987 a8083063 Iustin Pop
          result = False
1988 a8083063 Iustin Pop
  return result
1989 a8083063 Iustin Pop
1990 a8083063 Iustin Pop
1991 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
1992 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
1993 d4f16fd9 Iustin Pop

1994 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
1995 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
1996 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
1997 d4f16fd9 Iustin Pop
  exception.
1998 d4f16fd9 Iustin Pop

1999 d4f16fd9 Iustin Pop
  Args:
2000 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
2001 d4f16fd9 Iustin Pop
    - node: the node name
2002 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
2003 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
2004 d4f16fd9 Iustin Pop

2005 d4f16fd9 Iustin Pop
  """
2006 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
2007 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2008 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2009 d4f16fd9 Iustin Pop
                             " information" % (node,))
2010 d4f16fd9 Iustin Pop
2011 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2012 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2013 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2014 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2015 d4f16fd9 Iustin Pop
  if requested > free_mem:
2016 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2017 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2018 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2019 d4f16fd9 Iustin Pop
2020 d4f16fd9 Iustin Pop
2021 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2022 a8083063 Iustin Pop
  """Starts an instance.
2023 a8083063 Iustin Pop

2024 a8083063 Iustin Pop
  """
2025 a8083063 Iustin Pop
  HPATH = "instance-start"
2026 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2027 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2028 e873317a Guido Trotter
  REQ_BGL = False
2029 e873317a Guido Trotter
2030 e873317a Guido Trotter
  def ExpandNames(self):
2031 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2032 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2033 e873317a Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2034 e873317a Guido Trotter
2035 e873317a Guido Trotter
  def DeclareLocks(self, level):
2036 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2037 e873317a Guido Trotter
      self._LockInstancesNodes()
2038 a8083063 Iustin Pop
2039 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2040 a8083063 Iustin Pop
    """Build hooks env.
2041 a8083063 Iustin Pop

2042 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2043 a8083063 Iustin Pop

2044 a8083063 Iustin Pop
    """
2045 a8083063 Iustin Pop
    env = {
2046 a8083063 Iustin Pop
      "FORCE": self.op.force,
2047 a8083063 Iustin Pop
      }
2048 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2049 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2050 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2051 a8083063 Iustin Pop
    return env, nl, nl
2052 a8083063 Iustin Pop
2053 a8083063 Iustin Pop
  def CheckPrereq(self):
2054 a8083063 Iustin Pop
    """Check prerequisites.
2055 a8083063 Iustin Pop

2056 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2057 a8083063 Iustin Pop

2058 a8083063 Iustin Pop
    """
2059 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2060 e873317a Guido Trotter
    assert self.instance is not None, \
2061 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2062 a8083063 Iustin Pop
2063 a8083063 Iustin Pop
    # check bridges existance
2064 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2065 a8083063 Iustin Pop
2066 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2067 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2068 d4f16fd9 Iustin Pop
                         instance.memory)
2069 d4f16fd9 Iustin Pop
2070 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2071 a8083063 Iustin Pop
    """Start the instance.
2072 a8083063 Iustin Pop

2073 a8083063 Iustin Pop
    """
2074 a8083063 Iustin Pop
    instance = self.instance
2075 a8083063 Iustin Pop
    force = self.op.force
2076 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2077 a8083063 Iustin Pop
2078 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2079 fe482621 Iustin Pop
2080 a8083063 Iustin Pop
    node_current = instance.primary_node
2081 a8083063 Iustin Pop
2082 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2083 a8083063 Iustin Pop
2084 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2085 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2086 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2087 a8083063 Iustin Pop
2088 a8083063 Iustin Pop
2089 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2090 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2091 bf6929a2 Alexander Schreiber

2092 bf6929a2 Alexander Schreiber
  """
2093 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2094 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2095 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2096 e873317a Guido Trotter
  REQ_BGL = False
2097 e873317a Guido Trotter
2098 e873317a Guido Trotter
  def ExpandNames(self):
2099 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2100 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2101 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2102 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2103 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2104 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2105 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2106 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2107 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2108 e873317a Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2109 e873317a Guido Trotter
2110 e873317a Guido Trotter
  def DeclareLocks(self, level):
2111 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2112 0fcc5db3 Guido Trotter
      # FIXME: lock only primary on (not constants.INSTANCE_REBOOT_FULL)
2113 e873317a Guido Trotter
      self._LockInstancesNodes()
2114 bf6929a2 Alexander Schreiber
2115 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2116 bf6929a2 Alexander Schreiber
    """Build hooks env.
2117 bf6929a2 Alexander Schreiber

2118 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2119 bf6929a2 Alexander Schreiber

2120 bf6929a2 Alexander Schreiber
    """
2121 bf6929a2 Alexander Schreiber
    env = {
2122 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2123 bf6929a2 Alexander Schreiber
      }
2124 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2125 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2126 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2127 bf6929a2 Alexander Schreiber
    return env, nl, nl
2128 bf6929a2 Alexander Schreiber
2129 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2130 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2131 bf6929a2 Alexander Schreiber

2132 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2133 bf6929a2 Alexander Schreiber

2134 bf6929a2 Alexander Schreiber
    """
2135 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2136 e873317a Guido Trotter
    assert self.instance is not None, \
2137 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2138 bf6929a2 Alexander Schreiber
2139 bf6929a2 Alexander Schreiber
    # check bridges existance
2140 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2141 bf6929a2 Alexander Schreiber
2142 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2143 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2144 bf6929a2 Alexander Schreiber

2145 bf6929a2 Alexander Schreiber
    """
2146 bf6929a2 Alexander Schreiber
    instance = self.instance
2147 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2148 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2149 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2150 bf6929a2 Alexander Schreiber
2151 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2152 bf6929a2 Alexander Schreiber
2153 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2154 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2155 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2156 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2157 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2158 bf6929a2 Alexander Schreiber
    else:
2159 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2160 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2161 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2162 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2163 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2164 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2165 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2166 bf6929a2 Alexander Schreiber
2167 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2168 bf6929a2 Alexander Schreiber
2169 bf6929a2 Alexander Schreiber
2170 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2171 a8083063 Iustin Pop
  """Shutdown an instance.
2172 a8083063 Iustin Pop

2173 a8083063 Iustin Pop
  """
2174 a8083063 Iustin Pop
  HPATH = "instance-stop"
2175 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2176 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2177 e873317a Guido Trotter
  REQ_BGL = False
2178 e873317a Guido Trotter
2179 e873317a Guido Trotter
  def ExpandNames(self):
2180 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2181 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2182 e873317a Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2183 e873317a Guido Trotter
2184 e873317a Guido Trotter
  def DeclareLocks(self, level):
2185 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2186 e873317a Guido Trotter
      self._LockInstancesNodes()
2187 a8083063 Iustin Pop
2188 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2189 a8083063 Iustin Pop
    """Build hooks env.
2190 a8083063 Iustin Pop

2191 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2192 a8083063 Iustin Pop

2193 a8083063 Iustin Pop
    """
2194 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2195 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2196 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2197 a8083063 Iustin Pop
    return env, nl, nl
2198 a8083063 Iustin Pop
2199 a8083063 Iustin Pop
  def CheckPrereq(self):
2200 a8083063 Iustin Pop
    """Check prerequisites.
2201 a8083063 Iustin Pop

2202 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2203 a8083063 Iustin Pop

2204 a8083063 Iustin Pop
    """
2205 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2206 e873317a Guido Trotter
    assert self.instance is not None, \
2207 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2208 a8083063 Iustin Pop
2209 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2210 a8083063 Iustin Pop
    """Shutdown the instance.
2211 a8083063 Iustin Pop

2212 a8083063 Iustin Pop
    """
2213 a8083063 Iustin Pop
    instance = self.instance
2214 a8083063 Iustin Pop
    node_current = instance.primary_node
2215 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2216 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2217 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2218 a8083063 Iustin Pop
2219 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2220 a8083063 Iustin Pop
2221 a8083063 Iustin Pop
2222 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2223 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2224 fe7b0351 Michael Hanselmann

2225 fe7b0351 Michael Hanselmann
  """
2226 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2227 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2228 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2229 4e0b4d2d Guido Trotter
  REQ_BGL = False
2230 4e0b4d2d Guido Trotter
2231 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2232 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2233 4e0b4d2d Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2234 4e0b4d2d Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2235 4e0b4d2d Guido Trotter
2236 4e0b4d2d Guido Trotter
  def DeclareLocks(self, level):
2237 4e0b4d2d Guido Trotter
    if level == locking.LEVEL_NODE:
2238 4e0b4d2d Guido Trotter
      self._LockInstancesNodes()
2239 fe7b0351 Michael Hanselmann
2240 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2241 fe7b0351 Michael Hanselmann
    """Build hooks env.
2242 fe7b0351 Michael Hanselmann

2243 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2244 fe7b0351 Michael Hanselmann

2245 fe7b0351 Michael Hanselmann
    """
2246 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2247 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2248 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2249 fe7b0351 Michael Hanselmann
    return env, nl, nl
2250 fe7b0351 Michael Hanselmann
2251 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2252 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2253 fe7b0351 Michael Hanselmann

2254 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2255 fe7b0351 Michael Hanselmann

2256 fe7b0351 Michael Hanselmann
    """
2257 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2258 4e0b4d2d Guido Trotter
    assert instance is not None, \
2259 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2260 4e0b4d2d Guido Trotter
2261 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2262 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2263 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2264 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2265 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2266 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2267 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2268 fe7b0351 Michael Hanselmann
    if remote_info:
2269 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2270 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2271 3ecf6786 Iustin Pop
                                  instance.primary_node))
2272 d0834de3 Michael Hanselmann
2273 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2274 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2275 d0834de3 Michael Hanselmann
      # OS verification
2276 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2277 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2278 d0834de3 Michael Hanselmann
      if pnode is None:
2279 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2280 3ecf6786 Iustin Pop
                                   self.op.pnode)
2281 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2282 dfa96ded Guido Trotter
      if not os_obj:
2283 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2284 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2285 d0834de3 Michael Hanselmann
2286 fe7b0351 Michael Hanselmann
    self.instance = instance
2287 fe7b0351 Michael Hanselmann
2288 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2289 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2290 fe7b0351 Michael Hanselmann

2291 fe7b0351 Michael Hanselmann
    """
2292 fe7b0351 Michael Hanselmann
    inst = self.instance
2293 fe7b0351 Michael Hanselmann
2294 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2295 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2296 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2297 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2298 d0834de3 Michael Hanselmann
2299 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2300 fe7b0351 Michael Hanselmann
    try:
2301 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2302 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2303 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2304 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2305 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2306 fe7b0351 Michael Hanselmann
    finally:
2307 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2308 fe7b0351 Michael Hanselmann
2309 fe7b0351 Michael Hanselmann
2310 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2311 decd5f45 Iustin Pop
  """Rename an instance.
2312 decd5f45 Iustin Pop

2313 decd5f45 Iustin Pop
  """
2314 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2315 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2316 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2317 decd5f45 Iustin Pop
2318 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2319 decd5f45 Iustin Pop
    """Build hooks env.
2320 decd5f45 Iustin Pop

2321 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2322 decd5f45 Iustin Pop

2323 decd5f45 Iustin Pop
    """
2324 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2325 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2326 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2327 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2328 decd5f45 Iustin Pop
    return env, nl, nl
2329 decd5f45 Iustin Pop
2330 decd5f45 Iustin Pop
  def CheckPrereq(self):
2331 decd5f45 Iustin Pop
    """Check prerequisites.
2332 decd5f45 Iustin Pop

2333 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2334 decd5f45 Iustin Pop

2335 decd5f45 Iustin Pop
    """
2336 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2337 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2338 decd5f45 Iustin Pop
    if instance is None:
2339 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2340 decd5f45 Iustin Pop
                                 self.op.instance_name)
2341 decd5f45 Iustin Pop
    if instance.status != "down":
2342 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2343 decd5f45 Iustin Pop
                                 self.op.instance_name)
2344 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2345 decd5f45 Iustin Pop
    if remote_info:
2346 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2347 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2348 decd5f45 Iustin Pop
                                  instance.primary_node))
2349 decd5f45 Iustin Pop
    self.instance = instance
2350 decd5f45 Iustin Pop
2351 decd5f45 Iustin Pop
    # new name verification
2352 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2353 decd5f45 Iustin Pop
2354 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2355 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2356 7bde3275 Guido Trotter
    if new_name in instance_list:
2357 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2358 c09f363f Manuel Franceschini
                                 new_name)
2359 7bde3275 Guido Trotter
2360 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2361 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2362 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2363 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2364 decd5f45 Iustin Pop
2365 decd5f45 Iustin Pop
2366 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2367 decd5f45 Iustin Pop
    """Reinstall the instance.
2368 decd5f45 Iustin Pop

2369 decd5f45 Iustin Pop
    """
2370 decd5f45 Iustin Pop
    inst = self.instance
2371 decd5f45 Iustin Pop
    old_name = inst.name
2372 decd5f45 Iustin Pop
2373 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2374 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2375 b23c4333 Manuel Franceschini
2376 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2377 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2378 74b5913f Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
2379 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2380 decd5f45 Iustin Pop
2381 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2382 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2383 decd5f45 Iustin Pop
2384 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2385 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2386 b23c4333 Manuel Franceschini
      result = rpc.call_file_storage_dir_rename(inst.primary_node,
2387 b23c4333 Manuel Franceschini
                                                old_file_storage_dir,
2388 b23c4333 Manuel Franceschini
                                                new_file_storage_dir)
2389 b23c4333 Manuel Franceschini
2390 b23c4333 Manuel Franceschini
      if not result:
2391 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2392 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2393 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2394 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2395 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2396 b23c4333 Manuel Franceschini
2397 b23c4333 Manuel Franceschini
      if not result[0]:
2398 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2399 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2400 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2401 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2402 b23c4333 Manuel Franceschini
2403 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2404 decd5f45 Iustin Pop
    try:
2405 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2406 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2407 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2408 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2409 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2410 decd5f45 Iustin Pop
        logger.Error(msg)
2411 decd5f45 Iustin Pop
    finally:
2412 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2413 decd5f45 Iustin Pop
2414 decd5f45 Iustin Pop
2415 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2416 a8083063 Iustin Pop
  """Remove an instance.
2417 a8083063 Iustin Pop

2418 a8083063 Iustin Pop
  """
2419 a8083063 Iustin Pop
  HPATH = "instance-remove"
2420 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2421 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2422 a8083063 Iustin Pop
2423 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2424 a8083063 Iustin Pop
    """Build hooks env.
2425 a8083063 Iustin Pop

2426 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2427 a8083063 Iustin Pop

2428 a8083063 Iustin Pop
    """
2429 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2430 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2431 a8083063 Iustin Pop
    return env, nl, nl
2432 a8083063 Iustin Pop
2433 a8083063 Iustin Pop
  def CheckPrereq(self):
2434 a8083063 Iustin Pop
    """Check prerequisites.
2435 a8083063 Iustin Pop

2436 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2437 a8083063 Iustin Pop

2438 a8083063 Iustin Pop
    """
2439 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2440 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2441 a8083063 Iustin Pop
    if instance is None:
2442 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2443 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2444 a8083063 Iustin Pop
    self.instance = instance
2445 a8083063 Iustin Pop
2446 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2447 a8083063 Iustin Pop
    """Remove the instance.
2448 a8083063 Iustin Pop

2449 a8083063 Iustin Pop
    """
2450 a8083063 Iustin Pop
    instance = self.instance
2451 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2452 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2453 a8083063 Iustin Pop
2454 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2455 1d67656e Iustin Pop
      if self.op.ignore_failures:
2456 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2457 1d67656e Iustin Pop
      else:
2458 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2459 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2460 a8083063 Iustin Pop
2461 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2462 a8083063 Iustin Pop
2463 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2464 1d67656e Iustin Pop
      if self.op.ignore_failures:
2465 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2466 1d67656e Iustin Pop
      else:
2467 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2468 a8083063 Iustin Pop
2469 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2470 a8083063 Iustin Pop
2471 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2472 a2fd9afc Guido Trotter
    # Remove the new instance from the Ganeti Lock Manager
2473 a2fd9afc Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, instance.name)
2474 a8083063 Iustin Pop
2475 a8083063 Iustin Pop
2476 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2477 a8083063 Iustin Pop
  """Logical unit for querying instances.
2478 a8083063 Iustin Pop

2479 a8083063 Iustin Pop
  """
2480 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2481 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2482 a8083063 Iustin Pop
2483 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2484 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2485 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2486 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2487 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2488 3fb1e1c5 Alexander Schreiber
                               "sda_size", "sdb_size", "vcpus", "tags",
2489 3fb1e1c5 Alexander Schreiber
                               "auto_balance",
2490 3fb1e1c5 Alexander Schreiber
                               "network_port", "kernel_path", "initrd_path",
2491 3fb1e1c5 Alexander Schreiber
                               "hvm_boot_order", "hvm_acpi", "hvm_pae",
2492 3fb1e1c5 Alexander Schreiber
                               "hvm_cdrom_image_path", "hvm_nic_type",
2493 3fb1e1c5 Alexander Schreiber
                               "hvm_disk_type", "vnc_bind_address"],
2494 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2495 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2496 a8083063 Iustin Pop
2497 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2498 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2499 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2500 7eb9d8f7 Guido Trotter
2501 7eb9d8f7 Guido Trotter
    # TODO: we could lock instances (and nodes) only if the user asked for
2502 7eb9d8f7 Guido Trotter
    # dynamic fields. For that we need atomic ways to get info for a group of
2503 7eb9d8f7 Guido Trotter
    # instances from the config, though.
2504 7eb9d8f7 Guido Trotter
    if not self.op.names:
2505 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
2506 7eb9d8f7 Guido Trotter
    else:
2507 7eb9d8f7 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = \
2508 7eb9d8f7 Guido Trotter
        _GetWantedInstances(self, self.op.names)
2509 7eb9d8f7 Guido Trotter
2510 7eb9d8f7 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2511 7eb9d8f7 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2512 7eb9d8f7 Guido Trotter
2513 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2514 7eb9d8f7 Guido Trotter
    # TODO: locking of nodes could be avoided when not querying them
2515 7eb9d8f7 Guido Trotter
    if level == locking.LEVEL_NODE:
2516 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2517 7eb9d8f7 Guido Trotter
2518 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2519 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2520 7eb9d8f7 Guido Trotter

2521 7eb9d8f7 Guido Trotter
    """
2522 7eb9d8f7 Guido Trotter
    # This of course is valid only if we locked the instances
2523 6683bba2 Guido Trotter
    self.wanted = self.acquired_locks[locking.LEVEL_INSTANCE]
2524 069dcc86 Iustin Pop
2525 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2526 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2527 a8083063 Iustin Pop

2528 a8083063 Iustin Pop
    """
2529 069dcc86 Iustin Pop
    instance_names = self.wanted
2530 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2531 a8083063 Iustin Pop
                     in instance_names]
2532 a8083063 Iustin Pop
2533 a8083063 Iustin Pop
    # begin data gathering
2534 a8083063 Iustin Pop
2535 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2536 a8083063 Iustin Pop
2537 a8083063 Iustin Pop
    bad_nodes = []
2538 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2539 a8083063 Iustin Pop
      live_data = {}
2540 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2541 a8083063 Iustin Pop
      for name in nodes:
2542 a8083063 Iustin Pop
        result = node_data[name]
2543 a8083063 Iustin Pop
        if result:
2544 a8083063 Iustin Pop
          live_data.update(result)
2545 a8083063 Iustin Pop
        elif result == False:
2546 a8083063 Iustin Pop
          bad_nodes.append(name)
2547 a8083063 Iustin Pop
        # else no instance is alive
2548 a8083063 Iustin Pop
    else:
2549 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2550 a8083063 Iustin Pop
2551 a8083063 Iustin Pop
    # end data gathering
2552 a8083063 Iustin Pop
2553 a8083063 Iustin Pop
    output = []
2554 a8083063 Iustin Pop
    for instance in instance_list:
2555 a8083063 Iustin Pop
      iout = []
2556 a8083063 Iustin Pop
      for field in self.op.output_fields:
2557 a8083063 Iustin Pop
        if field == "name":
2558 a8083063 Iustin Pop
          val = instance.name
2559 a8083063 Iustin Pop
        elif field == "os":
2560 a8083063 Iustin Pop
          val = instance.os
2561 a8083063 Iustin Pop
        elif field == "pnode":
2562 a8083063 Iustin Pop
          val = instance.primary_node
2563 a8083063 Iustin Pop
        elif field == "snodes":
2564 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2565 a8083063 Iustin Pop
        elif field == "admin_state":
2566 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2567 a8083063 Iustin Pop
        elif field == "oper_state":
2568 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2569 8a23d2d3 Iustin Pop
            val = None
2570 a8083063 Iustin Pop
          else:
2571 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2572 d8052456 Iustin Pop
        elif field == "status":
2573 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2574 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2575 d8052456 Iustin Pop
          else:
2576 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2577 d8052456 Iustin Pop
            if running:
2578 d8052456 Iustin Pop
              if instance.status != "down":
2579 d8052456 Iustin Pop
                val = "running"
2580 d8052456 Iustin Pop
              else:
2581 d8052456 Iustin Pop
                val = "ERROR_up"
2582 d8052456 Iustin Pop
            else:
2583 d8052456 Iustin Pop
              if instance.status != "down":
2584 d8052456 Iustin Pop
                val = "ERROR_down"
2585 d8052456 Iustin Pop
              else:
2586 d8052456 Iustin Pop
                val = "ADMIN_down"
2587 a8083063 Iustin Pop
        elif field == "admin_ram":
2588 a8083063 Iustin Pop
          val = instance.memory
2589 a8083063 Iustin Pop
        elif field == "oper_ram":
2590 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2591 8a23d2d3 Iustin Pop
            val = None
2592 a8083063 Iustin Pop
          elif instance.name in live_data:
2593 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2594 a8083063 Iustin Pop
          else:
2595 a8083063 Iustin Pop
            val = "-"
2596 a8083063 Iustin Pop
        elif field == "disk_template":
2597 a8083063 Iustin Pop
          val = instance.disk_template
2598 a8083063 Iustin Pop
        elif field == "ip":
2599 a8083063 Iustin Pop
          val = instance.nics[0].ip
2600 a8083063 Iustin Pop
        elif field == "bridge":
2601 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2602 a8083063 Iustin Pop
        elif field == "mac":
2603 a8083063 Iustin Pop
          val = instance.nics[0].mac
2604 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2605 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2606 644eeef9 Iustin Pop
          if disk is None:
2607 8a23d2d3 Iustin Pop
            val = None
2608 644eeef9 Iustin Pop
          else:
2609 644eeef9 Iustin Pop
            val = disk.size
2610 d6d415e8 Iustin Pop
        elif field == "vcpus":
2611 d6d415e8 Iustin Pop
          val = instance.vcpus
2612 130a6a6f Iustin Pop
        elif field == "tags":
2613 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2614 3fb1e1c5 Alexander Schreiber
        elif field in ("network_port", "kernel_path", "initrd_path",
2615 3fb1e1c5 Alexander Schreiber
                       "hvm_boot_order", "hvm_acpi", "hvm_pae",
2616 3fb1e1c5 Alexander Schreiber
                       "hvm_cdrom_image_path", "hvm_nic_type",
2617 3fb1e1c5 Alexander Schreiber
                       "hvm_disk_type", "vnc_bind_address"):
2618 3fb1e1c5 Alexander Schreiber
          val = getattr(instance, field, None)
2619 3fb1e1c5 Alexander Schreiber
          if val is not None:
2620 3fb1e1c5 Alexander Schreiber
            pass
2621 3fb1e1c5 Alexander Schreiber
          elif field in ("hvm_nic_type", "hvm_disk_type",
2622 3fb1e1c5 Alexander Schreiber
                         "kernel_path", "initrd_path"):
2623 3fb1e1c5 Alexander Schreiber
            val = "default"
2624 3fb1e1c5 Alexander Schreiber
          else:
2625 3fb1e1c5 Alexander Schreiber
            val = "-"
2626 a8083063 Iustin Pop
        else:
2627 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2628 a8083063 Iustin Pop
        iout.append(val)
2629 a8083063 Iustin Pop
      output.append(iout)
2630 a8083063 Iustin Pop
2631 a8083063 Iustin Pop
    return output
2632 a8083063 Iustin Pop
2633 a8083063 Iustin Pop
2634 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2635 a8083063 Iustin Pop
  """Failover an instance.
2636 a8083063 Iustin Pop

2637 a8083063 Iustin Pop
  """
2638 a8083063 Iustin Pop
  HPATH = "instance-failover"
2639 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2640 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2641 c9e5c064 Guido Trotter
  REQ_BGL = False
2642 c9e5c064 Guido Trotter
2643 c9e5c064 Guido Trotter
  def ExpandNames(self):
2644 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2645 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2646 c9e5c064 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2647 c9e5c064 Guido Trotter
2648 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2649 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2650 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2651 a8083063 Iustin Pop
2652 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2653 a8083063 Iustin Pop
    """Build hooks env.
2654 a8083063 Iustin Pop

2655 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2656 a8083063 Iustin Pop

2657 a8083063 Iustin Pop
    """
2658 a8083063 Iustin Pop
    env = {
2659 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2660 a8083063 Iustin Pop
      }
2661 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2662 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2663 a8083063 Iustin Pop
    return env, nl, nl
2664 a8083063 Iustin Pop
2665 a8083063 Iustin Pop
  def CheckPrereq(self):
2666 a8083063 Iustin Pop
    """Check prerequisites.
2667 a8083063 Iustin Pop

2668 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2669 a8083063 Iustin Pop

2670 a8083063 Iustin Pop
    """
2671 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2672 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2673 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2674 a8083063 Iustin Pop
2675 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2676 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2677 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2678 2a710df1 Michael Hanselmann
2679 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2680 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2681 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2682 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2683 2a710df1 Michael Hanselmann
2684 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2685 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2686 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2687 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2688 3a7c308e Guido Trotter
2689 a8083063 Iustin Pop
    # check bridge existance
2690 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2691 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2692 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2693 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2694 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2695 a8083063 Iustin Pop
2696 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2697 a8083063 Iustin Pop
    """Failover an instance.
2698 a8083063 Iustin Pop

2699 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2700 a8083063 Iustin Pop
    starting it on the secondary.
2701 a8083063 Iustin Pop

2702 a8083063 Iustin Pop
    """
2703 a8083063 Iustin Pop
    instance = self.instance
2704 a8083063 Iustin Pop
2705 a8083063 Iustin Pop
    source_node = instance.primary_node
2706 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2707 a8083063 Iustin Pop
2708 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2709 a8083063 Iustin Pop
    for dev in instance.disks:
2710 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2711 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2712 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2713 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2714 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2715 a8083063 Iustin Pop
2716 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2717 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2718 a8083063 Iustin Pop
                (instance.name, source_node))
2719 a8083063 Iustin Pop
2720 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2721 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2722 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2723 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2724 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2725 24a40d57 Iustin Pop
      else:
2726 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2727 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2728 a8083063 Iustin Pop
2729 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2730 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2731 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2732 a8083063 Iustin Pop
2733 a8083063 Iustin Pop
    instance.primary_node = target_node
2734 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2735 b6102dab Guido Trotter
    self.cfg.Update(instance)
2736 a8083063 Iustin Pop
2737 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2738 12a0cfbe Guido Trotter
    if instance.status == "up":
2739 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2740 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2741 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2742 12a0cfbe Guido Trotter
2743 12a0cfbe Guido Trotter
      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2744 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2745 12a0cfbe Guido Trotter
      if not disks_ok:
2746 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2747 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2748 a8083063 Iustin Pop
2749 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2750 12a0cfbe Guido Trotter
      if not rpc.call_instance_start(target_node, instance, None):
2751 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2752 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2753 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2754 a8083063 Iustin Pop
2755 a8083063 Iustin Pop
2756 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2757 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2758 a8083063 Iustin Pop

2759 a8083063 Iustin Pop
  This always creates all devices.
2760 a8083063 Iustin Pop

2761 a8083063 Iustin Pop
  """
2762 a8083063 Iustin Pop
  if device.children:
2763 a8083063 Iustin Pop
    for child in device.children:
2764 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2765 a8083063 Iustin Pop
        return False
2766 a8083063 Iustin Pop
2767 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2768 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2769 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2770 a8083063 Iustin Pop
  if not new_id:
2771 a8083063 Iustin Pop
    return False
2772 a8083063 Iustin Pop
  if device.physical_id is None:
2773 a8083063 Iustin Pop
    device.physical_id = new_id
2774 a8083063 Iustin Pop
  return True
2775 a8083063 Iustin Pop
2776 a8083063 Iustin Pop
2777 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2778 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2779 a8083063 Iustin Pop

2780 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2781 a8083063 Iustin Pop
  all its children.
2782 a8083063 Iustin Pop

2783 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2784 a8083063 Iustin Pop

2785 a8083063 Iustin Pop
  """
2786 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2787 a8083063 Iustin Pop
    force = True
2788 a8083063 Iustin Pop
  if device.children:
2789 a8083063 Iustin Pop
    for child in device.children:
2790 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2791 3f78eef2 Iustin Pop
                                        child, force, info):
2792 a8083063 Iustin Pop
        return False
2793 a8083063 Iustin Pop
2794 a8083063 Iustin Pop
  if not force:
2795 a8083063 Iustin Pop
    return True
2796 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2797 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2798 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2799 a8083063 Iustin Pop
  if not new_id:
2800 a8083063 Iustin Pop
    return False
2801 a8083063 Iustin Pop
  if device.physical_id is None:
2802 a8083063 Iustin Pop
    device.physical_id = new_id
2803 a8083063 Iustin Pop
  return True
2804 a8083063 Iustin Pop
2805 a8083063 Iustin Pop
2806 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2807 923b1523 Iustin Pop
  """Generate a suitable LV name.
2808 923b1523 Iustin Pop

2809 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2810 923b1523 Iustin Pop

2811 923b1523 Iustin Pop
  """
2812 923b1523 Iustin Pop
  results = []
2813 923b1523 Iustin Pop
  for val in exts:
2814 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2815 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2816 923b1523 Iustin Pop
  return results
2817 923b1523 Iustin Pop
2818 923b1523 Iustin Pop
2819 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2820 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2821 a1f445d3 Iustin Pop

2822 a1f445d3 Iustin Pop
  """
2823 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2824 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2825 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2826 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2827 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2828 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2829 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2830 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2831 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2832 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2833 a1f445d3 Iustin Pop
  return drbd_dev
2834 a1f445d3 Iustin Pop
2835 7c0d6283 Michael Hanselmann
2836 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2837 a8083063 Iustin Pop
                          instance_name, primary_node,
2838 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
2839 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
2840 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2841 a8083063 Iustin Pop

2842 a8083063 Iustin Pop
  """
2843 a8083063 Iustin Pop
  #TODO: compute space requirements
2844 a8083063 Iustin Pop
2845 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2846 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2847 a8083063 Iustin Pop
    disks = []
2848 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2849 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2850 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2851 923b1523 Iustin Pop
2852 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2853 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2854 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2855 a8083063 Iustin Pop
                           iv_name = "sda")
2856 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2857 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2858 a8083063 Iustin Pop
                           iv_name = "sdb")
2859 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2860 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2861 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2862 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2863 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2864 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2865 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2866 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2867 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2868 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2869 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2870 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2871 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
2872 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
2873 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
2874 0f1a06e3 Manuel Franceschini
2875 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
2876 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
2877 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
2878 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
2879 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
2880 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
2881 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
2882 a8083063 Iustin Pop
  else:
2883 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2884 a8083063 Iustin Pop
  return disks
2885 a8083063 Iustin Pop
2886 a8083063 Iustin Pop
2887 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2888 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2889 3ecf6786 Iustin Pop

2890 3ecf6786 Iustin Pop
  """
2891 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2892 a0c3fea1 Michael Hanselmann
2893 a0c3fea1 Michael Hanselmann
2894 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2895 a8083063 Iustin Pop
  """Create all disks for an instance.
2896 a8083063 Iustin Pop

2897 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2898 a8083063 Iustin Pop

2899 a8083063 Iustin Pop
  Args:
2900 a8083063 Iustin Pop
    instance: the instance object
2901 a8083063 Iustin Pop

2902 a8083063 Iustin Pop
  Returns:
2903 a8083063 Iustin Pop
    True or False showing the success of the creation process
2904 a8083063 Iustin Pop

2905 a8083063 Iustin Pop
  """
2906 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2907 a0c3fea1 Michael Hanselmann
2908 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2909 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2910 0f1a06e3 Manuel Franceschini
    result = rpc.call_file_storage_dir_create(instance.primary_node,
2911 0f1a06e3 Manuel Franceschini
                                              file_storage_dir)
2912 0f1a06e3 Manuel Franceschini
2913 0f1a06e3 Manuel Franceschini
    if not result:
2914 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
2915 0f1a06e3 Manuel Franceschini
      return False
2916 0f1a06e3 Manuel Franceschini
2917 0f1a06e3 Manuel Franceschini
    if not result[0]:
2918 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
2919 0f1a06e3 Manuel Franceschini
      return False
2920 0f1a06e3 Manuel Franceschini
2921 a8083063 Iustin Pop
  for device in instance.disks:
2922 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2923 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
2924 a8083063 Iustin Pop
    #HARDCODE
2925 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2926 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2927 3f78eef2 Iustin Pop
                                        device, False, info):
2928 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2929 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2930 a8083063 Iustin Pop
        return False
2931 a8083063 Iustin Pop
    #HARDCODE
2932 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2933 3f78eef2 Iustin Pop
                                    instance, device, info):
2934 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2935 a8083063 Iustin Pop
                   device.iv_name)
2936 a8083063 Iustin Pop
      return False
2937 1c6e3627 Manuel Franceschini
2938 a8083063 Iustin Pop
  return True
2939 a8083063 Iustin Pop
2940 a8083063 Iustin Pop
2941 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2942 a8083063 Iustin Pop
  """Remove all disks for an instance.
2943 a8083063 Iustin Pop

2944 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2945 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2946 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2947 a8083063 Iustin Pop
  with `_CreateDisks()`).
2948 a8083063 Iustin Pop

2949 a8083063 Iustin Pop
  Args:
2950 a8083063 Iustin Pop
    instance: the instance object
2951 a8083063 Iustin Pop

2952 a8083063 Iustin Pop
  Returns:
2953 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2954 a8083063 Iustin Pop

2955 a8083063 Iustin Pop
  """
2956 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2957 a8083063 Iustin Pop
2958 a8083063 Iustin Pop
  result = True
2959 a8083063 Iustin Pop
  for device in instance.disks:
2960 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2961 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2962 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2963 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2964 a8083063 Iustin Pop
                     " continuing anyway" %
2965 a8083063 Iustin Pop
                     (device.iv_name, node))
2966 a8083063 Iustin Pop
        result = False
2967 0f1a06e3 Manuel Franceschini
2968 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2969 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2970 0f1a06e3 Manuel Franceschini
    if not rpc.call_file_storage_dir_remove(instance.primary_node,
2971 0f1a06e3 Manuel Franceschini
                                            file_storage_dir):
2972 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
2973 0f1a06e3 Manuel Franceschini
      result = False
2974 0f1a06e3 Manuel Franceschini
2975 a8083063 Iustin Pop
  return result
2976 a8083063 Iustin Pop
2977 a8083063 Iustin Pop
2978 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
2979 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
2980 e2fe6369 Iustin Pop

2981 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
2982 e2fe6369 Iustin Pop

2983 e2fe6369 Iustin Pop
  """
2984 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
2985 e2fe6369 Iustin Pop
  req_size_dict = {
2986 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
2987 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
2988 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
2989 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
2990 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
2991 e2fe6369 Iustin Pop
  }
2992 e2fe6369 Iustin Pop
2993 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
2994 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
2995 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
2996 e2fe6369 Iustin Pop
2997 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
2998 e2fe6369 Iustin Pop
2999 e2fe6369 Iustin Pop
3000 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3001 a8083063 Iustin Pop
  """Create an instance.
3002 a8083063 Iustin Pop

3003 a8083063 Iustin Pop
  """
3004 a8083063 Iustin Pop
  HPATH = "instance-add"
3005 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3006 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
3007 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
3008 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
3009 a8083063 Iustin Pop
3010 538475ca Iustin Pop
  def _RunAllocator(self):
3011 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3012 538475ca Iustin Pop

3013 538475ca Iustin Pop
    """
3014 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3015 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3016 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3017 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3018 d1c2dd75 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3019 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3020 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3021 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3022 d1c2dd75 Iustin Pop
                     tags=[],
3023 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3024 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
3025 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
3026 d1c2dd75 Iustin Pop
                     disks=disks,
3027 d1c2dd75 Iustin Pop
                     nics=nics,
3028 29859cb7 Iustin Pop
                     )
3029 d1c2dd75 Iustin Pop
3030 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3031 d1c2dd75 Iustin Pop
3032 d1c2dd75 Iustin Pop
    if not ial.success:
3033 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3034 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3035 d1c2dd75 Iustin Pop
                                                           ial.info))
3036 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3037 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3038 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3039 27579978 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3040 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3041 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
3042 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
3043 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
3044 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
3045 27579978 Iustin Pop
    if ial.required_nodes == 2:
3046 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3047 538475ca Iustin Pop
3048 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3049 a8083063 Iustin Pop
    """Build hooks env.
3050 a8083063 Iustin Pop

3051 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3052 a8083063 Iustin Pop

3053 a8083063 Iustin Pop
    """
3054 a8083063 Iustin Pop
    env = {
3055 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3056 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3057 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3058 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3059 a8083063 Iustin Pop
      }
3060 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3061 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3062 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3063 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
3064 396e1b78 Michael Hanselmann
3065 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3066 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3067 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3068 396e1b78 Michael Hanselmann
      status=self.instance_status,
3069 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3070 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
3071 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
3072 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3073 396e1b78 Michael Hanselmann
    ))
3074 a8083063 Iustin Pop
3075 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
3076 a8083063 Iustin Pop
          self.secondaries)
3077 a8083063 Iustin Pop
    return env, nl, nl
3078 a8083063 Iustin Pop
3079 a8083063 Iustin Pop
3080 a8083063 Iustin Pop
  def CheckPrereq(self):
3081 a8083063 Iustin Pop
    """Check prerequisites.
3082 a8083063 Iustin Pop

3083 a8083063 Iustin Pop
    """
3084 538475ca Iustin Pop
    # set optional parameters to none if they don't exist
3085 538475ca Iustin Pop
    for attr in ["kernel_path", "initrd_path", "hvm_boot_order", "pnode",
3086 31a853d2 Iustin Pop
                 "iallocator", "hvm_acpi", "hvm_pae", "hvm_cdrom_image_path",
3087 5397e0b7 Alexander Schreiber
                 "hvm_nic_type", "hvm_disk_type", "vnc_bind_address"]:
3088 40ed12dd Guido Trotter
      if not hasattr(self.op, attr):
3089 40ed12dd Guido Trotter
        setattr(self.op, attr, None)
3090 40ed12dd Guido Trotter
3091 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
3092 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
3093 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3094 3ecf6786 Iustin Pop
                                 self.op.mode)
3095 a8083063 Iustin Pop
3096 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3097 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3098 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3099 eedc99de Manuel Franceschini
                                 " instances")
3100 eedc99de Manuel Franceschini
3101 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3102 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
3103 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
3104 a8083063 Iustin Pop
      if src_node is None or src_path is None:
3105 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
3106 3ecf6786 Iustin Pop
                                   " node and path options")
3107 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
3108 a8083063 Iustin Pop
      if src_node_full is None:
3109 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
3110 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
3111 a8083063 Iustin Pop
3112 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
3113 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
3114 a8083063 Iustin Pop
3115 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
3116 a8083063 Iustin Pop
3117 a8083063 Iustin Pop
      if not export_info:
3118 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3119 a8083063 Iustin Pop
3120 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3121 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3122 a8083063 Iustin Pop
3123 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3124 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3125 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3126 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3127 a8083063 Iustin Pop
3128 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3129 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3130 3ecf6786 Iustin Pop
                                   " one data disk")
3131 a8083063 Iustin Pop
3132 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3133 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3134 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3135 a8083063 Iustin Pop
                                                         'disk0_dump'))
3136 a8083063 Iustin Pop
      self.src_image = diskimage
3137 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
3138 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
3139 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
3140 a8083063 Iustin Pop
3141 901a65c1 Iustin Pop
    #### instance parameters check
3142 901a65c1 Iustin Pop
3143 a8083063 Iustin Pop
    # disk template and mirror node verification
3144 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3145 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
3146 a8083063 Iustin Pop
3147 901a65c1 Iustin Pop
    # instance name verification
3148 901a65c1 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
3149 901a65c1 Iustin Pop
3150 901a65c1 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
3151 901a65c1 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
3152 901a65c1 Iustin Pop
    if instance_name in instance_list:
3153 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3154 901a65c1 Iustin Pop
                                 instance_name)
3155 901a65c1 Iustin Pop
3156 901a65c1 Iustin Pop
    # ip validity checks
3157 901a65c1 Iustin Pop
    ip = getattr(self.op, "ip", None)
3158 901a65c1 Iustin Pop
    if ip is None or ip.lower() == "none":
3159 901a65c1 Iustin Pop
      inst_ip = None
3160 901a65c1 Iustin Pop
    elif ip.lower() == "auto":
3161 901a65c1 Iustin Pop
      inst_ip = hostname1.ip
3162 901a65c1 Iustin Pop
    else:
3163 901a65c1 Iustin Pop
      if not utils.IsValidIP(ip):
3164 901a65c1 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3165 901a65c1 Iustin Pop
                                   " like a valid IP" % ip)
3166 901a65c1 Iustin Pop
      inst_ip = ip
3167 901a65c1 Iustin Pop
    self.inst_ip = self.op.ip = inst_ip
3168 901a65c1 Iustin Pop
3169 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3170 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3171 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3172 901a65c1 Iustin Pop
3173 901a65c1 Iustin Pop
    if self.op.ip_check:
3174 901a65c1 Iustin Pop
      if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT):
3175 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3176 901a65c1 Iustin Pop
                                   (hostname1.ip, instance_name))
3177 901a65c1 Iustin Pop
3178 901a65c1 Iustin Pop
    # MAC address verification
3179 901a65c1 Iustin Pop
    if self.op.mac != "auto":
3180 901a65c1 Iustin Pop
      if not utils.IsValidMac(self.op.mac.lower()):
3181 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3182 901a65c1 Iustin Pop
                                   self.op.mac)
3183 901a65c1 Iustin Pop
3184 901a65c1 Iustin Pop
    # bridge verification
3185 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3186 901a65c1 Iustin Pop
    if bridge is None:
3187 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3188 901a65c1 Iustin Pop
    else:
3189 901a65c1 Iustin Pop
      self.op.bridge = bridge
3190 901a65c1 Iustin Pop
3191 901a65c1 Iustin Pop
    # boot order verification
3192 901a65c1 Iustin Pop
    if self.op.hvm_boot_order is not None:
3193 901a65c1 Iustin Pop
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3194 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid boot order specified,"
3195 901a65c1 Iustin Pop
                                   " must be one or more of [acdn]")
3196 901a65c1 Iustin Pop
    # file storage checks
3197 0f1a06e3 Manuel Franceschini
    if (self.op.file_driver and
3198 0f1a06e3 Manuel Franceschini
        not self.op.file_driver in constants.FILE_DRIVER):
3199 0f1a06e3 Manuel Franceschini
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3200 0f1a06e3 Manuel Franceschini
                                 self.op.file_driver)
3201 0f1a06e3 Manuel Franceschini
3202 0f1a06e3 Manuel Franceschini
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3203 b4de68a9 Iustin Pop
      raise errors.OpPrereqError("File storage directory not a relative"
3204 b4de68a9 Iustin Pop
                                 " path")
3205 538475ca Iustin Pop
    #### allocator run
3206 538475ca Iustin Pop
3207 538475ca Iustin Pop
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3208 538475ca Iustin Pop
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3209 538475ca Iustin Pop
                                 " node must be given")
3210 538475ca Iustin Pop
3211 538475ca Iustin Pop
    if self.op.iallocator is not None:
3212 538475ca Iustin Pop
      self._RunAllocator()
3213 0f1a06e3 Manuel Franceschini
3214 901a65c1 Iustin Pop
    #### node related checks
3215 901a65c1 Iustin Pop
3216 901a65c1 Iustin Pop
    # check primary node
3217 901a65c1 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
3218 901a65c1 Iustin Pop
    if pnode is None:
3219 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
3220 901a65c1 Iustin Pop
                                 self.op.pnode)
3221 901a65c1 Iustin Pop
    self.op.pnode = pnode.name
3222 901a65c1 Iustin Pop
    self.pnode = pnode
3223 901a65c1 Iustin Pop
    self.secondaries = []
3224 901a65c1 Iustin Pop
3225 901a65c1 Iustin Pop
    # mirror node verification
3226 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3227 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
3228 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3229 3ecf6786 Iustin Pop
                                   " a mirror node")
3230 a8083063 Iustin Pop
3231 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
3232 a8083063 Iustin Pop
      if snode_name is None:
3233 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
3234 3ecf6786 Iustin Pop
                                   self.op.snode)
3235 a8083063 Iustin Pop
      elif snode_name == pnode.name:
3236 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3237 3ecf6786 Iustin Pop
                                   " the primary node.")
3238 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
3239 a8083063 Iustin Pop
3240 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3241 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3242 ed1ebc60 Guido Trotter
3243 8d75db10 Iustin Pop
    # Check lv size requirements
3244 8d75db10 Iustin Pop
    if req_size is not None:
3245 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3246 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3247 8d75db10 Iustin Pop
      for node in nodenames:
3248 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3249 8d75db10 Iustin Pop
        if not info:
3250 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3251 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3252 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3253 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3254 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3255 8d75db10 Iustin Pop
                                     " node %s" % node)
3256 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3257 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3258 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3259 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3260 ed1ebc60 Guido Trotter
3261 a8083063 Iustin Pop
    # os verification
3262 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3263 dfa96ded Guido Trotter
    if not os_obj:
3264 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3265 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3266 a8083063 Iustin Pop
3267 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3268 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3269 3b6d8c9b Iustin Pop
3270 a8083063 Iustin Pop
3271 901a65c1 Iustin Pop
    # bridge check on primary node
3272 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3273 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3274 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3275 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3276 a8083063 Iustin Pop
3277 49ce1563 Iustin Pop
    # memory check on primary node
3278 49ce1563 Iustin Pop
    if self.op.start:
3279 49ce1563 Iustin Pop
      _CheckNodeFreeMemory(self.cfg, self.pnode.name,
3280 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3281 49ce1563 Iustin Pop
                           self.op.mem_size)
3282 49ce1563 Iustin Pop
3283 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
3284 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
3285 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
3286 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
3287 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
3288 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3289 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
3290 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
3291 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
3292 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
3293 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3294 31a853d2 Iustin Pop
3295 31a853d2 Iustin Pop
    # vnc_bind_address verification
3296 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
3297 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
3298 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
3299 31a853d2 Iustin Pop
                                   " like a valid IP address" %
3300 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
3301 31a853d2 Iustin Pop
3302 5397e0b7 Alexander Schreiber
    # Xen HVM device type checks
3303 5397e0b7 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
3304 5397e0b7 Alexander Schreiber
      if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
3305 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid NIC type %s specified for Xen HVM"
3306 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_nic_type)
3307 5397e0b7 Alexander Schreiber
      if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
3308 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid disk type %s specified for Xen HVM"
3309 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_disk_type)
3310 5397e0b7 Alexander Schreiber
3311 a8083063 Iustin Pop
    if self.op.start:
3312 a8083063 Iustin Pop
      self.instance_status = 'up'
3313 a8083063 Iustin Pop
    else:
3314 a8083063 Iustin Pop
      self.instance_status = 'down'
3315 a8083063 Iustin Pop
3316 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3317 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3318 a8083063 Iustin Pop

3319 a8083063 Iustin Pop
    """
3320 a8083063 Iustin Pop
    instance = self.op.instance_name
3321 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3322 a8083063 Iustin Pop
3323 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3324 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3325 1862d460 Alexander Schreiber
    else:
3326 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3327 1862d460 Alexander Schreiber
3328 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3329 a8083063 Iustin Pop
    if self.inst_ip is not None:
3330 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3331 a8083063 Iustin Pop
3332 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3333 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3334 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3335 2a6469d5 Alexander Schreiber
    else:
3336 2a6469d5 Alexander Schreiber
      network_port = None
3337 58acb49d Alexander Schreiber
3338 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is None:
3339 31a853d2 Iustin Pop
      self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3340 31a853d2 Iustin Pop
3341 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3342 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3343 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3344 2c313123 Manuel Franceschini
    else:
3345 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3346 2c313123 Manuel Franceschini
3347 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3348 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3349 0f1a06e3 Manuel Franceschini
                                        self.sstore.GetFileStorageDir(),
3350 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3351 0f1a06e3 Manuel Franceschini
3352 0f1a06e3 Manuel Franceschini
3353 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3354 a8083063 Iustin Pop
                                  self.op.disk_template,
3355 a8083063 Iustin Pop
                                  instance, pnode_name,
3356 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3357 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3358 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3359 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3360 a8083063 Iustin Pop
3361 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3362 a8083063 Iustin Pop
                            primary_node=pnode_name,
3363 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3364 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3365 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3366 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3367 a8083063 Iustin Pop
                            status=self.instance_status,
3368 58acb49d Alexander Schreiber
                            network_port=network_port,
3369 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3370 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3371 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3372 31a853d2 Iustin Pop
                            hvm_acpi=self.op.hvm_acpi,
3373 31a853d2 Iustin Pop
                            hvm_pae=self.op.hvm_pae,
3374 31a853d2 Iustin Pop
                            hvm_cdrom_image_path=self.op.hvm_cdrom_image_path,
3375 31a853d2 Iustin Pop
                            vnc_bind_address=self.op.vnc_bind_address,
3376 5397e0b7 Alexander Schreiber
                            hvm_nic_type=self.op.hvm_nic_type,
3377 5397e0b7 Alexander Schreiber
                            hvm_disk_type=self.op.hvm_disk_type,
3378 a8083063 Iustin Pop
                            )
3379 a8083063 Iustin Pop
3380 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3381 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3382 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3383 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3384 a8083063 Iustin Pop
3385 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3386 a8083063 Iustin Pop
3387 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3388 a2fd9afc Guido Trotter
    # Add the new instance to the Ganeti Lock Manager
3389 a2fd9afc Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, instance)
3390 a8083063 Iustin Pop
3391 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3392 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3393 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3394 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3395 a8083063 Iustin Pop
      time.sleep(15)
3396 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3397 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3398 a8083063 Iustin Pop
    else:
3399 a8083063 Iustin Pop
      disk_abort = False
3400 a8083063 Iustin Pop
3401 a8083063 Iustin Pop
    if disk_abort:
3402 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3403 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3404 a2fd9afc Guido Trotter
      # Remove the new instance from the Ganeti Lock Manager
3405 a2fd9afc Guido Trotter
      self.context.glm.remove(locking.LEVEL_INSTANCE, iobj.name)
3406 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3407 3ecf6786 Iustin Pop
                               " this instance")
3408 a8083063 Iustin Pop
3409 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3410 a8083063 Iustin Pop
                (instance, pnode_name))
3411 a8083063 Iustin Pop
3412 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3413 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3414 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3415 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3416 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3417 3ecf6786 Iustin Pop
                                   " on node %s" %
3418 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3419 a8083063 Iustin Pop
3420 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3421 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3422 a8083063 Iustin Pop
        src_node = self.op.src_node
3423 a8083063 Iustin Pop
        src_image = self.src_image
3424 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3425 a8083063 Iustin Pop
                                                src_node, src_image):
3426 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3427 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3428 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3429 a8083063 Iustin Pop
      else:
3430 a8083063 Iustin Pop
        # also checked in the prereq part
3431 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3432 3ecf6786 Iustin Pop
                                     % self.op.mode)
3433 a8083063 Iustin Pop
3434 a8083063 Iustin Pop
    if self.op.start:
3435 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3436 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3437 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3438 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3439 a8083063 Iustin Pop
3440 a8083063 Iustin Pop
3441 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3442 a8083063 Iustin Pop
  """Connect to an instance's console.
3443 a8083063 Iustin Pop

3444 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3445 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3446 a8083063 Iustin Pop
  console.
3447 a8083063 Iustin Pop

3448 a8083063 Iustin Pop
  """
3449 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3450 8659b73e Guido Trotter
  REQ_BGL = False
3451 8659b73e Guido Trotter
3452 8659b73e Guido Trotter
  def ExpandNames(self):
3453 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3454 a8083063 Iustin Pop
3455 a8083063 Iustin Pop
  def CheckPrereq(self):
3456 a8083063 Iustin Pop
    """Check prerequisites.
3457 a8083063 Iustin Pop

3458 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3459 a8083063 Iustin Pop

3460 a8083063 Iustin Pop
    """
3461 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3462 8659b73e Guido Trotter
    assert self.instance is not None, \
3463 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3464 a8083063 Iustin Pop
3465 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3466 a8083063 Iustin Pop
    """Connect to the console of an instance
3467 a8083063 Iustin Pop

3468 a8083063 Iustin Pop
    """
3469 a8083063 Iustin Pop
    instance = self.instance
3470 a8083063 Iustin Pop
    node = instance.primary_node
3471 a8083063 Iustin Pop
3472 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3473 a8083063 Iustin Pop
    if node_insts is False:
3474 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3475 a8083063 Iustin Pop
3476 a8083063 Iustin Pop
    if instance.name not in node_insts:
3477 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3478 a8083063 Iustin Pop
3479 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3480 a8083063 Iustin Pop
3481 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3482 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3483 b047857b Michael Hanselmann
3484 82122173 Iustin Pop
    # build ssh cmdline
3485 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3486 a8083063 Iustin Pop
3487 a8083063 Iustin Pop
3488 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3489 a8083063 Iustin Pop
  """Replace the disks of an instance.
3490 a8083063 Iustin Pop

3491 a8083063 Iustin Pop
  """
3492 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3493 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3494 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3495 a8083063 Iustin Pop
3496 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3497 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3498 b6e82a65 Iustin Pop

3499 b6e82a65 Iustin Pop
    """
3500 b6e82a65 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3501 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3502 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3503 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3504 b6e82a65 Iustin Pop
3505 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3506 b6e82a65 Iustin Pop
3507 b6e82a65 Iustin Pop
    if not ial.success:
3508 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3509 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3510 b6e82a65 Iustin Pop
                                                           ial.info))
3511 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3512 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3513 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3514 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3515 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3516 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3517 b6e82a65 Iustin Pop
                    self.op.remote_node)
3518 b6e82a65 Iustin Pop
3519 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3520 a8083063 Iustin Pop
    """Build hooks env.
3521 a8083063 Iustin Pop

3522 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3523 a8083063 Iustin Pop

3524 a8083063 Iustin Pop
    """
3525 a8083063 Iustin Pop
    env = {
3526 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3527 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3528 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3529 a8083063 Iustin Pop
      }
3530 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3531 0834c866 Iustin Pop
    nl = [
3532 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3533 0834c866 Iustin Pop
      self.instance.primary_node,
3534 0834c866 Iustin Pop
      ]
3535 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3536 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3537 a8083063 Iustin Pop
    return env, nl, nl
3538 a8083063 Iustin Pop
3539 a8083063 Iustin Pop
  def CheckPrereq(self):
3540 a8083063 Iustin Pop
    """Check prerequisites.
3541 a8083063 Iustin Pop

3542 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3543 a8083063 Iustin Pop

3544 a8083063 Iustin Pop
    """
3545 b6e82a65 Iustin Pop
    if not hasattr(self.op, "remote_node"):
3546 b6e82a65 Iustin Pop
      self.op.remote_node = None
3547 b6e82a65 Iustin Pop
3548 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3549 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3550 a8083063 Iustin Pop
    if instance is None:
3551 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3552 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3553 a8083063 Iustin Pop
    self.instance = instance
3554 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3555 a8083063 Iustin Pop
3556 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3557 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3558 a9e0c397 Iustin Pop
                                 " network mirrored.")
3559 a8083063 Iustin Pop
3560 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3561 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3562 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3563 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3564 a8083063 Iustin Pop
3565 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3566 a9e0c397 Iustin Pop
3567 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3568 b6e82a65 Iustin Pop
    if ia_name is not None:
3569 b6e82a65 Iustin Pop
      if self.op.remote_node is not None:
3570 b6e82a65 Iustin Pop
        raise errors.OpPrereqError("Give either the iallocator or the new"
3571 b6e82a65 Iustin Pop
                                   " secondary, not both")
3572 b6e82a65 Iustin Pop
      self.op.remote_node = self._RunAllocator()
3573 b6e82a65 Iustin Pop
3574 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3575 a9e0c397 Iustin Pop
    if remote_node is not None:
3576 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3577 a8083063 Iustin Pop
      if remote_node is None:
3578 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3579 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3580 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3581 a9e0c397 Iustin Pop
    else:
3582 a9e0c397 Iustin Pop
      self.remote_node_info = None
3583 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3584 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3585 3ecf6786 Iustin Pop
                                 " the instance.")
3586 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3587 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3588 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3589 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3590 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3591 0834c866 Iustin Pop
                                   " replacement")
3592 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3593 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3594 7df43a76 Iustin Pop
          remote_node is not None):
3595 7df43a76 Iustin Pop
        # switch to replace secondary mode
3596 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3597 7df43a76 Iustin Pop
3598 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3599 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3600 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3601 a9e0c397 Iustin Pop
                                   " both at once")
3602 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3603 a9e0c397 Iustin Pop
        if remote_node is not None:
3604 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3605 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3606 a9e0c397 Iustin Pop
                                     " node disk replacement")
3607 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3608 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3609 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3610 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3611 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3612 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3613 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3614 a9e0c397 Iustin Pop
      else:
3615 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3616 a9e0c397 Iustin Pop
3617 a9e0c397 Iustin Pop
    for name in self.op.disks:
3618 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3619 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3620 a9e0c397 Iustin Pop
                                   (name, instance.name))
3621 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3622 a8083063 Iustin Pop
3623 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3624 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3625 a9e0c397 Iustin Pop

3626 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3627 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3628 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3629 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3630 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3631 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3632 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3633 a9e0c397 Iustin Pop
      - wait for sync across all devices
3634 a9e0c397 Iustin Pop
      - for each modified disk:
3635 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3636 a9e0c397 Iustin Pop

3637 a9e0c397 Iustin Pop
    Failures are not very well handled.
3638 cff90b79 Iustin Pop

3639 a9e0c397 Iustin Pop
    """
3640 cff90b79 Iustin Pop
    steps_total = 6
3641 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3642 a9e0c397 Iustin Pop
    instance = self.instance
3643 a9e0c397 Iustin Pop
    iv_names = {}
3644 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3645 a9e0c397 Iustin Pop
    # start of work
3646 a9e0c397 Iustin Pop
    cfg = self.cfg
3647 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3648 cff90b79 Iustin Pop
    oth_node = self.oth_node
3649 cff90b79 Iustin Pop
3650 cff90b79 Iustin Pop
    # Step: check device activation
3651 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3652 cff90b79 Iustin Pop
    info("checking volume groups")
3653 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3654 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3655 cff90b79 Iustin Pop
    if not results:
3656 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3657 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3658 cff90b79 Iustin Pop
      res = results.get(node, False)
3659 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3660 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3661 cff90b79 Iustin Pop
                                 (my_vg, node))
3662 cff90b79 Iustin Pop
    for dev in instance.disks:
3663 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3664 cff90b79 Iustin Pop
        continue
3665 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3666 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3667 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3668 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3669 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3670 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3671 cff90b79 Iustin Pop
3672 cff90b79 Iustin Pop
    # Step: check other node consistency
3673 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3674 cff90b79 Iustin Pop
    for dev in instance.disks:
3675 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3676 cff90b79 Iustin Pop
        continue
3677 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3678 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3679 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3680 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3681 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3682 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3683 cff90b79 Iustin Pop
3684 cff90b79 Iustin Pop
    # Step: create new storage
3685 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3686 a9e0c397 Iustin Pop
    for dev in instance.disks:
3687 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3688 a9e0c397 Iustin Pop
        continue
3689 a9e0c397 Iustin Pop
      size = dev.size
3690 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3691 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3692 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3693 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3694 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3695 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3696 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3697 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3698 a9e0c397 Iustin Pop
      old_lvs = dev.children
3699 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3700 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3701 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3702 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3703 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3704 a9e0c397 Iustin Pop
      # are talking about the secondary node
3705 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3706 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3707 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3708 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3709 a9e0c397 Iustin Pop
                                   " node '%s'" %
3710 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3711 a9e0c397 Iustin Pop
3712 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3713 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3714 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3715 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3716 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3717 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3718 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3719 cff90b79 Iustin Pop
      #dev.children = []
3720 cff90b79 Iustin Pop
      #cfg.Update(instance)
3721 a9e0c397 Iustin Pop
3722 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3723 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3724 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3725 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3726 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3727 cff90b79 Iustin Pop
3728 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3729 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3730 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3731 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3732 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3733 cff90b79 Iustin Pop
      rlist = []
3734 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3735 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3736 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3737 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3738 cff90b79 Iustin Pop
3739 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3740 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3741 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3742 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3743 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3744 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3745 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3746 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3747 cff90b79 Iustin Pop
3748 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3749 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3750 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3751 a9e0c397 Iustin Pop
3752 cff90b79 Iustin Pop
      for disk in old_lvs:
3753 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3754 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3755 a9e0c397 Iustin Pop
3756 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3757 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3758 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3759 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3760 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3761 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3762 cff90b79 Iustin Pop
                    " logical volumes")
3763 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3764 a9e0c397 Iustin Pop
3765 a9e0c397 Iustin Pop
      dev.children = new_lvs
3766 a9e0c397 Iustin Pop
      cfg.Update(instance)
3767 a9e0c397 Iustin Pop
3768 cff90b79 Iustin Pop
    # Step: wait for sync
3769 a9e0c397 Iustin Pop
3770 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3771 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3772 a9e0c397 Iustin Pop
    # return value
3773 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3774 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3775 a9e0c397 Iustin Pop
3776 a9e0c397 Iustin Pop
    # so check manually all the devices
3777 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3778 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3779 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3780 a9e0c397 Iustin Pop
      if is_degr:
3781 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3782 a9e0c397 Iustin Pop
3783 cff90b79 Iustin Pop
    # Step: remove old storage
3784 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3785 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3786 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3787 a9e0c397 Iustin Pop
      for lv in old_lvs:
3788 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3789 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3790 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3791 a9e0c397 Iustin Pop
          continue
3792 a9e0c397 Iustin Pop
3793 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3794 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3795 a9e0c397 Iustin Pop

3796 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3797 a9e0c397 Iustin Pop
      - for all disks of the instance:
3798 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3799 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3800 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3801 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3802 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3803 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3804 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3805 a9e0c397 Iustin Pop
          not network enabled
3806 a9e0c397 Iustin Pop
      - wait for sync across all devices
3807 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3808 a9e0c397 Iustin Pop

3809 a9e0c397 Iustin Pop
    Failures are not very well handled.
3810 0834c866 Iustin Pop

3811 a9e0c397 Iustin Pop
    """
3812 0834c866 Iustin Pop
    steps_total = 6
3813 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3814 a9e0c397 Iustin Pop
    instance = self.instance
3815 a9e0c397 Iustin Pop
    iv_names = {}
3816 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3817 a9e0c397 Iustin Pop
    # start of work
3818 a9e0c397 Iustin Pop
    cfg = self.cfg
3819 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3820 a9e0c397 Iustin Pop
    new_node = self.new_node
3821 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3822 0834c866 Iustin Pop
3823 0834c866 Iustin Pop
    # Step: check device activation
3824 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3825 0834c866 Iustin Pop
    info("checking volume groups")
3826 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3827 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3828 0834c866 Iustin Pop
    if not results:
3829 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3830 0834c866 Iustin Pop
    for node in pri_node, new_node:
3831 0834c866 Iustin Pop
      res = results.get(node, False)
3832 0834c866 Iustin Pop
      if not res or my_vg not in res:
3833 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3834 0834c866 Iustin Pop
                                 (my_vg, node))
3835 0834c866 Iustin Pop
    for dev in instance.disks:
3836 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3837 0834c866 Iustin Pop
        continue
3838 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3839 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3840 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3841 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3842 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3843 0834c866 Iustin Pop
3844 0834c866 Iustin Pop
    # Step: check other node consistency
3845 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3846 0834c866 Iustin Pop
    for dev in instance.disks:
3847 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3848 0834c866 Iustin Pop
        continue
3849 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3850 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3851 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3852 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3853 0834c866 Iustin Pop
                                 pri_node)
3854 0834c866 Iustin Pop
3855 0834c866 Iustin Pop
    # Step: create new storage
3856 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3857 a9e0c397 Iustin Pop
    for dev in instance.disks:
3858 a9e0c397 Iustin Pop
      size = dev.size
3859 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3860 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3861 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3862 a9e0c397 Iustin Pop
      # are talking about the secondary node
3863 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3864 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3865 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3866 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3867 a9e0c397 Iustin Pop
                                   " node '%s'" %
3868 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3869 a9e0c397 Iustin Pop
3870 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3871 0834c866 Iustin Pop
3872 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3873 0834c866 Iustin Pop
    for dev in instance.disks:
3874 0834c866 Iustin Pop
      size = dev.size
3875 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3876 a9e0c397 Iustin Pop
      # create new devices on new_node
3877 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3878 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3879 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3880 a9e0c397 Iustin Pop
                              children=dev.children)
3881 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3882 3f78eef2 Iustin Pop
                                        new_drbd, False,
3883 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3884 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3885 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3886 a9e0c397 Iustin Pop
3887 0834c866 Iustin Pop
    for dev in instance.disks:
3888 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3889 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3890 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3891 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3892 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3893 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
3894 a9e0c397 Iustin Pop
3895 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
3896 642445d9 Iustin Pop
    done = 0
3897 642445d9 Iustin Pop
    for dev in instance.disks:
3898 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3899 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
3900 642445d9 Iustin Pop
      # detach from network
3901 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
3902 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
3903 642445d9 Iustin Pop
      # standalone state
3904 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
3905 642445d9 Iustin Pop
        done += 1
3906 642445d9 Iustin Pop
      else:
3907 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
3908 642445d9 Iustin Pop
                dev.iv_name)
3909 642445d9 Iustin Pop
3910 642445d9 Iustin Pop
    if not done:
3911 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
3912 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3913 642445d9 Iustin Pop
3914 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
3915 642445d9 Iustin Pop
    # the instance to point to the new secondary
3916 642445d9 Iustin Pop
    info("updating instance configuration")
3917 642445d9 Iustin Pop
    for dev in instance.disks:
3918 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3919 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3920 642445d9 Iustin Pop
    cfg.Update(instance)
3921 a9e0c397 Iustin Pop
3922 642445d9 Iustin Pop
    # and now perform the drbd attach
3923 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
3924 642445d9 Iustin Pop
    failures = []
3925 642445d9 Iustin Pop
    for dev in instance.disks:
3926 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3927 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
3928 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
3929 642445d9 Iustin Pop
      # is correct
3930 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3931 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3932 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3933 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
3934 a9e0c397 Iustin Pop
3935 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3936 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3937 a9e0c397 Iustin Pop
    # return value
3938 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3939 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3940 a9e0c397 Iustin Pop
3941 a9e0c397 Iustin Pop
    # so check manually all the devices
3942 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3943 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3944 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3945 a9e0c397 Iustin Pop
      if is_degr:
3946 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3947 a9e0c397 Iustin Pop
3948 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3949 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3950 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
3951 a9e0c397 Iustin Pop
      for lv in old_lvs:
3952 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3953 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3954 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
3955 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
3956 a9e0c397 Iustin Pop
3957 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3958 a9e0c397 Iustin Pop
    """Execute disk replacement.
3959 a9e0c397 Iustin Pop

3960 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3961 a9e0c397 Iustin Pop

3962 a9e0c397 Iustin Pop
    """
3963 a9e0c397 Iustin Pop
    instance = self.instance
3964 22985314 Guido Trotter
3965 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
3966 22985314 Guido Trotter
    if instance.status == "down":
3967 22985314 Guido Trotter
      op = opcodes.OpActivateInstanceDisks(instance_name=instance.name)
3968 22985314 Guido Trotter
      self.proc.ChainOpCode(op)
3969 22985314 Guido Trotter
3970 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3971 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
3972 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
3973 a9e0c397 Iustin Pop
      else:
3974 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
3975 a9e0c397 Iustin Pop
    else:
3976 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
3977 22985314 Guido Trotter
3978 22985314 Guido Trotter
    ret = fn(feedback_fn)
3979 22985314 Guido Trotter
3980 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
3981 22985314 Guido Trotter
    if instance.status == "down":
3982 22985314 Guido Trotter
      op = opcodes.OpDeactivateInstanceDisks(instance_name=instance.name)
3983 22985314 Guido Trotter
      self.proc.ChainOpCode(op)
3984 22985314 Guido Trotter
3985 22985314 Guido Trotter
    return ret
3986 a9e0c397 Iustin Pop
3987 a8083063 Iustin Pop
3988 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
3989 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
3990 8729e0d7 Iustin Pop

3991 8729e0d7 Iustin Pop
  """
3992 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
3993 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3994 8729e0d7 Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount"]
3995 8729e0d7 Iustin Pop
3996 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
3997 8729e0d7 Iustin Pop
    """Build hooks env.
3998 8729e0d7 Iustin Pop

3999 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4000 8729e0d7 Iustin Pop

4001 8729e0d7 Iustin Pop
    """
4002 8729e0d7 Iustin Pop
    env = {
4003 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
4004 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
4005 8729e0d7 Iustin Pop
      }
4006 8729e0d7 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4007 8729e0d7 Iustin Pop
    nl = [
4008 8729e0d7 Iustin Pop
      self.sstore.GetMasterNode(),
4009 8729e0d7 Iustin Pop
      self.instance.primary_node,
4010 8729e0d7 Iustin Pop
      ]
4011 8729e0d7 Iustin Pop
    return env, nl, nl
4012 8729e0d7 Iustin Pop
4013 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4014 8729e0d7 Iustin Pop
    """Check prerequisites.
4015 8729e0d7 Iustin Pop

4016 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4017 8729e0d7 Iustin Pop

4018 8729e0d7 Iustin Pop
    """
4019 8729e0d7 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4020 8729e0d7 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
4021 8729e0d7 Iustin Pop
    if instance is None:
4022 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
4023 8729e0d7 Iustin Pop
                                 self.op.instance_name)
4024 8729e0d7 Iustin Pop
    self.instance = instance
4025 8729e0d7 Iustin Pop
    self.op.instance_name = instance.name
4026 8729e0d7 Iustin Pop
4027 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4028 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4029 8729e0d7 Iustin Pop
                                 " growing.")
4030 8729e0d7 Iustin Pop
4031 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
4032 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4033 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
4034 8729e0d7 Iustin Pop
4035 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4036 8729e0d7 Iustin Pop
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
4037 8729e0d7 Iustin Pop
    for node in nodenames:
4038 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4039 8729e0d7 Iustin Pop
      if not info:
4040 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4041 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4042 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4043 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4044 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4045 8729e0d7 Iustin Pop
                                   " node %s" % node)
4046 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4047 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4048 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4049 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4050 8729e0d7 Iustin Pop
4051 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4052 8729e0d7 Iustin Pop
    """Execute disk grow.
4053 8729e0d7 Iustin Pop

4054 8729e0d7 Iustin Pop
    """
4055 8729e0d7 Iustin Pop
    instance = self.instance
4056 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
4057 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4058 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4059 8729e0d7 Iustin Pop
      result = rpc.call_blockdev_grow(node, disk, self.op.amount)
4060 8729e0d7 Iustin Pop
      if not result or not isinstance(result, tuple) or len(result) != 2:
4061 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4062 8729e0d7 Iustin Pop
      elif not result[0]:
4063 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4064 8729e0d7 Iustin Pop
                                 (node, result[1]))
4065 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4066 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4067 8729e0d7 Iustin Pop
    return
4068 8729e0d7 Iustin Pop
4069 8729e0d7 Iustin Pop
4070 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4071 a8083063 Iustin Pop
  """Query runtime instance data.
4072 a8083063 Iustin Pop

4073 a8083063 Iustin Pop
  """
4074 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
4075 a8083063 Iustin Pop
4076 a8083063 Iustin Pop
  def CheckPrereq(self):
4077 a8083063 Iustin Pop
    """Check prerequisites.
4078 a8083063 Iustin Pop

4079 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4080 a8083063 Iustin Pop

4081 a8083063 Iustin Pop
    """
4082 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
4083 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4084 a8083063 Iustin Pop
    if self.op.instances:
4085 a8083063 Iustin Pop
      self.wanted_instances = []
4086 a8083063 Iustin Pop
      names = self.op.instances
4087 a8083063 Iustin Pop
      for name in names:
4088 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
4089 a8083063 Iustin Pop
        if instance is None:
4090 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
4091 515207af Guido Trotter
        self.wanted_instances.append(instance)
4092 a8083063 Iustin Pop
    else:
4093 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4094 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
4095 a8083063 Iustin Pop
    return
4096 a8083063 Iustin Pop
4097 a8083063 Iustin Pop
4098 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4099 a8083063 Iustin Pop
    """Compute block device status.
4100 a8083063 Iustin Pop

4101 a8083063 Iustin Pop
    """
4102 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4103 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4104 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4105 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4106 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4107 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4108 a8083063 Iustin Pop
      else:
4109 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4110 a8083063 Iustin Pop
4111 a8083063 Iustin Pop
    if snode:
4112 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4113 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4114 a8083063 Iustin Pop
    else:
4115 a8083063 Iustin Pop
      dev_sstatus = None
4116 a8083063 Iustin Pop
4117 a8083063 Iustin Pop
    if dev.children:
4118 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4119 a8083063 Iustin Pop
                      for child in dev.children]
4120 a8083063 Iustin Pop
    else:
4121 a8083063 Iustin Pop
      dev_children = []
4122 a8083063 Iustin Pop
4123 a8083063 Iustin Pop
    data = {
4124 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4125 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4126 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4127 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4128 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4129 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4130 a8083063 Iustin Pop
      "children": dev_children,
4131 a8083063 Iustin Pop
      }
4132 a8083063 Iustin Pop
4133 a8083063 Iustin Pop
    return data
4134 a8083063 Iustin Pop
4135 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4136 a8083063 Iustin Pop
    """Gather and return data"""
4137 a8083063 Iustin Pop
    result = {}
4138 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4139 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4140 a8083063 Iustin Pop
                                                instance.name)
4141 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4142 a8083063 Iustin Pop
        remote_state = "up"
4143 a8083063 Iustin Pop
      else:
4144 a8083063 Iustin Pop
        remote_state = "down"
4145 a8083063 Iustin Pop
      if instance.status == "down":
4146 a8083063 Iustin Pop
        config_state = "down"
4147 a8083063 Iustin Pop
      else:
4148 a8083063 Iustin Pop
        config_state = "up"
4149 a8083063 Iustin Pop
4150 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4151 a8083063 Iustin Pop
               for device in instance.disks]
4152 a8083063 Iustin Pop
4153 a8083063 Iustin Pop
      idict = {
4154 a8083063 Iustin Pop
        "name": instance.name,
4155 a8083063 Iustin Pop
        "config_state": config_state,
4156 a8083063 Iustin Pop
        "run_state": remote_state,
4157 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4158 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4159 a8083063 Iustin Pop
        "os": instance.os,
4160 a8083063 Iustin Pop
        "memory": instance.memory,
4161 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4162 a8083063 Iustin Pop
        "disks": disks,
4163 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4164 a8083063 Iustin Pop
        }
4165 a8083063 Iustin Pop
4166 a8340917 Iustin Pop
      htkind = self.sstore.GetHypervisorType()
4167 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_PVM30:
4168 a8340917 Iustin Pop
        idict["kernel_path"] = instance.kernel_path
4169 a8340917 Iustin Pop
        idict["initrd_path"] = instance.initrd_path
4170 a8340917 Iustin Pop
4171 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_HVM31:
4172 a8340917 Iustin Pop
        idict["hvm_boot_order"] = instance.hvm_boot_order
4173 a8340917 Iustin Pop
        idict["hvm_acpi"] = instance.hvm_acpi
4174 a8340917 Iustin Pop
        idict["hvm_pae"] = instance.hvm_pae
4175 a8340917 Iustin Pop
        idict["hvm_cdrom_image_path"] = instance.hvm_cdrom_image_path
4176 5397e0b7 Alexander Schreiber
        idict["hvm_nic_type"] = instance.hvm_nic_type
4177 5397e0b7 Alexander Schreiber
        idict["hvm_disk_type"] = instance.hvm_disk_type
4178 a8340917 Iustin Pop
4179 a8340917 Iustin Pop
      if htkind in constants.HTS_REQ_PORT:
4180 d0c11cf7 Alexander Schreiber
        if instance.vnc_bind_address is None:
4181 d0c11cf7 Alexander Schreiber
          vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4182 d0c11cf7 Alexander Schreiber
        else:
4183 d0c11cf7 Alexander Schreiber
          vnc_bind_address = instance.vnc_bind_address
4184 34b6ab97 Alexander Schreiber
        if instance.network_port is None:
4185 34b6ab97 Alexander Schreiber
          vnc_console_port = None
4186 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.BIND_ADDRESS_GLOBAL:
4187 a4273aba Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.primary_node,
4188 34b6ab97 Alexander Schreiber
                                       instance.network_port)
4189 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.LOCALHOST_IP_ADDRESS:
4190 d0c11cf7 Alexander Schreiber
          vnc_console_port = "%s:%s on node %s" % (vnc_bind_address,
4191 a4273aba Alexander Schreiber
                                                   instance.network_port,
4192 a4273aba Alexander Schreiber
                                                   instance.primary_node)
4193 34b6ab97 Alexander Schreiber
        else:
4194 34b6ab97 Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.vnc_bind_address,
4195 34b6ab97 Alexander Schreiber
                                        instance.network_port)
4196 34b6ab97 Alexander Schreiber
        idict["vnc_console_port"] = vnc_console_port
4197 d0c11cf7 Alexander Schreiber
        idict["vnc_bind_address"] = vnc_bind_address
4198 a8340917 Iustin Pop
        idict["network_port"] = instance.network_port
4199 a8340917 Iustin Pop
4200 a8083063 Iustin Pop
      result[instance.name] = idict
4201 a8083063 Iustin Pop
4202 a8083063 Iustin Pop
    return result
4203 a8083063 Iustin Pop
4204 a8083063 Iustin Pop
4205 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4206 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4207 a8083063 Iustin Pop

4208 a8083063 Iustin Pop
  """
4209 a8083063 Iustin Pop
  HPATH = "instance-modify"
4210 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4211 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4212 1a5c7281 Guido Trotter
  REQ_BGL = False
4213 1a5c7281 Guido Trotter
4214 1a5c7281 Guido Trotter
  def ExpandNames(self):
4215 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4216 a8083063 Iustin Pop
4217 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4218 a8083063 Iustin Pop
    """Build hooks env.
4219 a8083063 Iustin Pop

4220 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4221 a8083063 Iustin Pop

4222 a8083063 Iustin Pop
    """
4223 396e1b78 Michael Hanselmann
    args = dict()
4224 a8083063 Iustin Pop
    if self.mem:
4225 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4226 a8083063 Iustin Pop
    if self.vcpus:
4227 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4228 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4229 396e1b78 Michael Hanselmann
      if self.do_ip:
4230 396e1b78 Michael Hanselmann
        ip = self.ip
4231 396e1b78 Michael Hanselmann
      else:
4232 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4233 396e1b78 Michael Hanselmann
      if self.bridge:
4234 396e1b78 Michael Hanselmann
        bridge = self.bridge
4235 396e1b78 Michael Hanselmann
      else:
4236 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4237 ef756965 Iustin Pop
      if self.mac:
4238 ef756965 Iustin Pop
        mac = self.mac
4239 ef756965 Iustin Pop
      else:
4240 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4241 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4242 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4243 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4244 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4245 a8083063 Iustin Pop
    return env, nl, nl
4246 a8083063 Iustin Pop
4247 a8083063 Iustin Pop
  def CheckPrereq(self):
4248 a8083063 Iustin Pop
    """Check prerequisites.
4249 a8083063 Iustin Pop

4250 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4251 a8083063 Iustin Pop

4252 a8083063 Iustin Pop
    """
4253 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4254 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4255 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4256 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4257 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4258 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4259 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4260 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4261 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4262 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4263 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4264 31a853d2 Iustin Pop
    self.hvm_acpi = getattr(self.op, "hvm_acpi", None)
4265 31a853d2 Iustin Pop
    self.hvm_pae = getattr(self.op, "hvm_pae", None)
4266 5397e0b7 Alexander Schreiber
    self.hvm_nic_type = getattr(self.op, "hvm_nic_type", None)
4267 5397e0b7 Alexander Schreiber
    self.hvm_disk_type = getattr(self.op, "hvm_disk_type", None)
4268 31a853d2 Iustin Pop
    self.hvm_cdrom_image_path = getattr(self.op, "hvm_cdrom_image_path", None)
4269 31a853d2 Iustin Pop
    self.vnc_bind_address = getattr(self.op, "vnc_bind_address", None)
4270 4300c4b6 Guido Trotter
    self.force = getattr(self.op, "force", None)
4271 31a853d2 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4272 31a853d2 Iustin Pop
                 self.kernel_path, self.initrd_path, self.hvm_boot_order,
4273 31a853d2 Iustin Pop
                 self.hvm_acpi, self.hvm_pae, self.hvm_cdrom_image_path,
4274 5397e0b7 Alexander Schreiber
                 self.vnc_bind_address, self.hvm_nic_type, self.hvm_disk_type]
4275 31a853d2 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4276 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4277 a8083063 Iustin Pop
    if self.mem is not None:
4278 a8083063 Iustin Pop
      try:
4279 a8083063 Iustin Pop
        self.mem = int(self.mem)
4280 a8083063 Iustin Pop
      except ValueError, err:
4281 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4282 a8083063 Iustin Pop
    if self.vcpus is not None:
4283 a8083063 Iustin Pop
      try:
4284 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4285 a8083063 Iustin Pop
      except ValueError, err:
4286 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4287 a8083063 Iustin Pop
    if self.ip is not None:
4288 a8083063 Iustin Pop
      self.do_ip = True
4289 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4290 a8083063 Iustin Pop
        self.ip = None
4291 a8083063 Iustin Pop
      else:
4292 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4293 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4294 a8083063 Iustin Pop
    else:
4295 a8083063 Iustin Pop
      self.do_ip = False
4296 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4297 1862d460 Alexander Schreiber
    if self.mac is not None:
4298 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4299 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4300 1862d460 Alexander Schreiber
                                   self.mac)
4301 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4302 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4303 a8083063 Iustin Pop
4304 973d7867 Iustin Pop
    if self.kernel_path is not None:
4305 973d7867 Iustin Pop
      self.do_kernel_path = True
4306 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4307 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4308 973d7867 Iustin Pop
4309 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4310 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4311 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4312 973d7867 Iustin Pop
                                    " filename")
4313 8cafeb26 Iustin Pop
    else:
4314 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4315 973d7867 Iustin Pop
4316 973d7867 Iustin Pop
    if self.initrd_path is not None:
4317 973d7867 Iustin Pop
      self.do_initrd_path = True
4318 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4319 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4320 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4321 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4322 973d7867 Iustin Pop
                                    " filename")
4323 8cafeb26 Iustin Pop
    else:
4324 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4325 973d7867 Iustin Pop
4326 25c5878d Alexander Schreiber
    # boot order verification
4327 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4328 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4329 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4330 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4331 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4332 25c5878d Alexander Schreiber
                                     " or 'default'")
4333 25c5878d Alexander Schreiber
4334 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
4335 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
4336 3fc175f0 Alexander Schreiber
      if not (os.path.isabs(self.op.hvm_cdrom_image_path) or
4337 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4338 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
4339 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
4340 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4341 3fc175f0 Alexander Schreiber
      if not (os.path.isfile(self.op.hvm_cdrom_image_path) or
4342 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4343 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
4344 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
4345 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
4346 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4347 31a853d2 Iustin Pop
4348 31a853d2 Iustin Pop
    # vnc_bind_address verification
4349 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
4350 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
4351 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
4352 31a853d2 Iustin Pop
                                   " like a valid IP address" %
4353 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
4354 31a853d2 Iustin Pop
4355 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4356 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4357 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4358 cfefe007 Guido Trotter
    self.warn = []
4359 cfefe007 Guido Trotter
    if self.mem is not None and not self.force:
4360 cfefe007 Guido Trotter
      pnode = self.instance.primary_node
4361 cfefe007 Guido Trotter
      nodelist = [pnode]
4362 cfefe007 Guido Trotter
      nodelist.extend(instance.secondary_nodes)
4363 cfefe007 Guido Trotter
      instance_info = rpc.call_instance_info(pnode, instance.name)
4364 cfefe007 Guido Trotter
      nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
4365 cfefe007 Guido Trotter
4366 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4367 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4368 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4369 cfefe007 Guido Trotter
      else:
4370 cfefe007 Guido Trotter
        if instance_info:
4371 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4372 cfefe007 Guido Trotter
        else:
4373 cfefe007 Guido Trotter
          # Assume instance not running
4374 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4375 cfefe007 Guido Trotter
          # and we have no other way to check)
4376 cfefe007 Guido Trotter
          current_mem = 0
4377 cfefe007 Guido Trotter
        miss_mem = self.mem - current_mem - nodeinfo[pnode]['memory_free']
4378 cfefe007 Guido Trotter
        if miss_mem > 0:
4379 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4380 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4381 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4382 cfefe007 Guido Trotter
4383 cfefe007 Guido Trotter
      for node in instance.secondary_nodes:
4384 cfefe007 Guido Trotter
        if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4385 cfefe007 Guido Trotter
          self.warn.append("Can't get info from secondary node %s" % node)
4386 cfefe007 Guido Trotter
        elif self.mem > nodeinfo[node]['memory_free']:
4387 cfefe007 Guido Trotter
          self.warn.append("Not enough memory to failover instance to secondary"
4388 cfefe007 Guido Trotter
                           " node %s" % node)
4389 cfefe007 Guido Trotter
4390 5bc84f33 Alexander Schreiber
    # Xen HVM device type checks
4391 5bc84f33 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
4392 5bc84f33 Alexander Schreiber
      if self.op.hvm_nic_type is not None:
4393 5bc84f33 Alexander Schreiber
        if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
4394 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid NIC type %s specified for Xen"
4395 5bc84f33 Alexander Schreiber
                                     " HVM  hypervisor" % self.op.hvm_nic_type)
4396 5bc84f33 Alexander Schreiber
      if self.op.hvm_disk_type is not None:
4397 5bc84f33 Alexander Schreiber
        if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
4398 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid disk type %s specified for Xen"
4399 5bc84f33 Alexander Schreiber
                                     " HVM hypervisor" % self.op.hvm_disk_type)
4400 5bc84f33 Alexander Schreiber
4401 a8083063 Iustin Pop
    return
4402 a8083063 Iustin Pop
4403 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4404 a8083063 Iustin Pop
    """Modifies an instance.
4405 a8083063 Iustin Pop

4406 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4407 a8083063 Iustin Pop
    """
4408 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4409 cfefe007 Guido Trotter
    # feedback_fn there.
4410 cfefe007 Guido Trotter
    for warn in self.warn:
4411 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4412 cfefe007 Guido Trotter
4413 a8083063 Iustin Pop
    result = []
4414 a8083063 Iustin Pop
    instance = self.instance
4415 a8083063 Iustin Pop
    if self.mem:
4416 a8083063 Iustin Pop
      instance.memory = self.mem
4417 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4418 a8083063 Iustin Pop
    if self.vcpus:
4419 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4420 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4421 a8083063 Iustin Pop
    if self.do_ip:
4422 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4423 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4424 a8083063 Iustin Pop
    if self.bridge:
4425 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4426 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4427 1862d460 Alexander Schreiber
    if self.mac:
4428 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4429 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4430 973d7867 Iustin Pop
    if self.do_kernel_path:
4431 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4432 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4433 973d7867 Iustin Pop
    if self.do_initrd_path:
4434 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4435 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4436 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4437 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4438 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4439 25c5878d Alexander Schreiber
      else:
4440 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4441 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4442 3fc175f0 Alexander Schreiber
    if self.hvm_acpi is not None:
4443 ec1ba002 Iustin Pop
      instance.hvm_acpi = self.hvm_acpi
4444 31a853d2 Iustin Pop
      result.append(("hvm_acpi", self.hvm_acpi))
4445 3fc175f0 Alexander Schreiber
    if self.hvm_pae is not None:
4446 ec1ba002 Iustin Pop
      instance.hvm_pae = self.hvm_pae
4447 31a853d2 Iustin Pop
      result.append(("hvm_pae", self.hvm_pae))
4448 5397e0b7 Alexander Schreiber
    if self.hvm_nic_type is not None:
4449 5397e0b7 Alexander Schreiber
      instance.hvm_nic_type = self.hvm_nic_type
4450 5397e0b7 Alexander Schreiber
      result.append(("hvm_nic_type", self.hvm_nic_type))
4451 5397e0b7 Alexander Schreiber
    if self.hvm_disk_type is not None:
4452 5397e0b7 Alexander Schreiber
      instance.hvm_disk_type = self.hvm_disk_type
4453 5397e0b7 Alexander Schreiber
      result.append(("hvm_disk_type", self.hvm_disk_type))
4454 31a853d2 Iustin Pop
    if self.hvm_cdrom_image_path:
4455 3fc175f0 Alexander Schreiber
      if self.hvm_cdrom_image_path == constants.VALUE_NONE:
4456 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = None
4457 3fc175f0 Alexander Schreiber
      else:
4458 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
4459 31a853d2 Iustin Pop
      result.append(("hvm_cdrom_image_path", self.hvm_cdrom_image_path))
4460 31a853d2 Iustin Pop
    if self.vnc_bind_address:
4461 31a853d2 Iustin Pop
      instance.vnc_bind_address = self.vnc_bind_address
4462 31a853d2 Iustin Pop
      result.append(("vnc_bind_address", self.vnc_bind_address))
4463 a8083063 Iustin Pop
4464 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4465 a8083063 Iustin Pop
4466 a8083063 Iustin Pop
    return result
4467 a8083063 Iustin Pop
4468 a8083063 Iustin Pop
4469 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4470 a8083063 Iustin Pop
  """Query the exports list
4471 a8083063 Iustin Pop

4472 a8083063 Iustin Pop
  """
4473 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
4474 21a15682 Guido Trotter
  REQ_BGL = False
4475 21a15682 Guido Trotter
4476 21a15682 Guido Trotter
  def ExpandNames(self):
4477 21a15682 Guido Trotter
    self.needed_locks = {}
4478 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4479 21a15682 Guido Trotter
    if not self.op.nodes:
4480 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4481 21a15682 Guido Trotter
    else:
4482 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
4483 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
4484 a8083063 Iustin Pop
4485 a8083063 Iustin Pop
  def CheckPrereq(self):
4486 21a15682 Guido Trotter
    """Check prerequisites.
4487 a8083063 Iustin Pop

4488 a8083063 Iustin Pop
    """
4489 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
4490 a8083063 Iustin Pop
4491 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4492 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4493 a8083063 Iustin Pop

4494 a8083063 Iustin Pop
    Returns:
4495 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4496 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4497 a8083063 Iustin Pop
      that node.
4498 a8083063 Iustin Pop

4499 a8083063 Iustin Pop
    """
4500 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4501 a8083063 Iustin Pop
4502 a8083063 Iustin Pop
4503 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4504 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4505 a8083063 Iustin Pop

4506 a8083063 Iustin Pop
  """
4507 a8083063 Iustin Pop
  HPATH = "instance-export"
4508 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4509 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4510 a8083063 Iustin Pop
4511 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4512 a8083063 Iustin Pop
    """Build hooks env.
4513 a8083063 Iustin Pop

4514 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4515 a8083063 Iustin Pop

4516 a8083063 Iustin Pop
    """
4517 a8083063 Iustin Pop
    env = {
4518 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4519 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4520 a8083063 Iustin Pop
      }
4521 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4522 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4523 a8083063 Iustin Pop
          self.op.target_node]
4524 a8083063 Iustin Pop
    return env, nl, nl
4525 a8083063 Iustin Pop
4526 a8083063 Iustin Pop
  def CheckPrereq(self):
4527 a8083063 Iustin Pop
    """Check prerequisites.
4528 a8083063 Iustin Pop

4529 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4530 a8083063 Iustin Pop

4531 a8083063 Iustin Pop
    """
4532 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4533 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4534 a8083063 Iustin Pop
    if self.instance is None:
4535 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4536 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4537 a8083063 Iustin Pop
4538 a8083063 Iustin Pop
    # node verification
4539 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4540 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4541 a8083063 Iustin Pop
4542 a8083063 Iustin Pop
    if self.dst_node is None:
4543 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4544 3ecf6786 Iustin Pop
                                 self.op.target_node)
4545 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4546 a8083063 Iustin Pop
4547 b6023d6c Manuel Franceschini
    # instance disk type verification
4548 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4549 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4550 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4551 b6023d6c Manuel Franceschini
                                   " file-based disks")
4552 b6023d6c Manuel Franceschini
4553 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4554 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4555 a8083063 Iustin Pop

4556 a8083063 Iustin Pop
    """
4557 a8083063 Iustin Pop
    instance = self.instance
4558 a8083063 Iustin Pop
    dst_node = self.dst_node
4559 a8083063 Iustin Pop
    src_node = instance.primary_node
4560 a8083063 Iustin Pop
    if self.op.shutdown:
4561 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4562 fb300fb7 Guido Trotter
      if not rpc.call_instance_shutdown(src_node, instance):
4563 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4564 38206f3c Iustin Pop
                                 (instance.name, src_node))
4565 a8083063 Iustin Pop
4566 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4567 a8083063 Iustin Pop
4568 a8083063 Iustin Pop
    snap_disks = []
4569 a8083063 Iustin Pop
4570 a8083063 Iustin Pop
    try:
4571 a8083063 Iustin Pop
      for disk in instance.disks:
4572 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4573 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4574 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4575 a8083063 Iustin Pop
4576 a8083063 Iustin Pop
          if not new_dev_name:
4577 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4578 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4579 a8083063 Iustin Pop
          else:
4580 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4581 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4582 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4583 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4584 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4585 a8083063 Iustin Pop
4586 a8083063 Iustin Pop
    finally:
4587 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4588 fb300fb7 Guido Trotter
        if not rpc.call_instance_start(src_node, instance, None):
4589 fb300fb7 Guido Trotter
          _ShutdownInstanceDisks(instance, self.cfg)
4590 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4591 a8083063 Iustin Pop
4592 a8083063 Iustin Pop
    # TODO: check for size
4593 a8083063 Iustin Pop
4594 a8083063 Iustin Pop
    for dev in snap_disks:
4595 16687b98 Manuel Franceschini
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
4596 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4597 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4598 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4599 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4600 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4601 a8083063 Iustin Pop
4602 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4603 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4604 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4605 a8083063 Iustin Pop
4606 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4607 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4608 a8083063 Iustin Pop
4609 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4610 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4611 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4612 a8083063 Iustin Pop
    if nodelist:
4613 204f2086 Guido Trotter
      exportlist = rpc.call_export_list(nodelist)
4614 a8083063 Iustin Pop
      for node in exportlist:
4615 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4616 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4617 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4618 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4619 5c947f38 Iustin Pop
4620 5c947f38 Iustin Pop
4621 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4622 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4623 9ac99fda Guido Trotter

4624 9ac99fda Guido Trotter
  """
4625 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4626 9ac99fda Guido Trotter
4627 9ac99fda Guido Trotter
  def CheckPrereq(self):
4628 9ac99fda Guido Trotter
    """Check prerequisites.
4629 9ac99fda Guido Trotter
    """
4630 9ac99fda Guido Trotter
    pass
4631 9ac99fda Guido Trotter
4632 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4633 9ac99fda Guido Trotter
    """Remove any export.
4634 9ac99fda Guido Trotter

4635 9ac99fda Guido Trotter
    """
4636 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4637 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4638 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4639 9ac99fda Guido Trotter
    fqdn_warn = False
4640 9ac99fda Guido Trotter
    if not instance_name:
4641 9ac99fda Guido Trotter
      fqdn_warn = True
4642 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4643 9ac99fda Guido Trotter
4644 204f2086 Guido Trotter
    exportlist = rpc.call_export_list(self.cfg.GetNodeList())
4645 9ac99fda Guido Trotter
    found = False
4646 9ac99fda Guido Trotter
    for node in exportlist:
4647 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4648 9ac99fda Guido Trotter
        found = True
4649 9ac99fda Guido Trotter
        if not rpc.call_export_remove(node, instance_name):
4650 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4651 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4652 9ac99fda Guido Trotter
4653 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4654 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4655 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4656 9ac99fda Guido Trotter
                  " Domain Name.")
4657 9ac99fda Guido Trotter
4658 9ac99fda Guido Trotter
4659 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4660 5c947f38 Iustin Pop
  """Generic tags LU.
4661 5c947f38 Iustin Pop

4662 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4663 5c947f38 Iustin Pop

4664 5c947f38 Iustin Pop
  """
4665 5c947f38 Iustin Pop
  def CheckPrereq(self):
4666 5c947f38 Iustin Pop
    """Check prerequisites.
4667 5c947f38 Iustin Pop

4668 5c947f38 Iustin Pop
    """
4669 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4670 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4671 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4672 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4673 5c947f38 Iustin Pop
      if name is None:
4674 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4675 3ecf6786 Iustin Pop
                                   (self.op.name,))
4676 5c947f38 Iustin Pop
      self.op.name = name
4677 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4678 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4679 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4680 5c947f38 Iustin Pop
      if name is None:
4681 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4682 3ecf6786 Iustin Pop
                                   (self.op.name,))
4683 5c947f38 Iustin Pop
      self.op.name = name
4684 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4685 5c947f38 Iustin Pop
    else:
4686 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4687 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4688 5c947f38 Iustin Pop
4689 5c947f38 Iustin Pop
4690 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4691 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4692 5c947f38 Iustin Pop

4693 5c947f38 Iustin Pop
  """
4694 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4695 5c947f38 Iustin Pop
4696 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4697 5c947f38 Iustin Pop
    """Returns the tag list.
4698 5c947f38 Iustin Pop

4699 5c947f38 Iustin Pop
    """
4700 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
4701 5c947f38 Iustin Pop
4702 5c947f38 Iustin Pop
4703 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4704 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4705 73415719 Iustin Pop

4706 73415719 Iustin Pop
  """
4707 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4708 73415719 Iustin Pop
4709 73415719 Iustin Pop
  def CheckPrereq(self):
4710 73415719 Iustin Pop
    """Check prerequisites.
4711 73415719 Iustin Pop

4712 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4713 73415719 Iustin Pop

4714 73415719 Iustin Pop
    """
4715 73415719 Iustin Pop
    try:
4716 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4717 73415719 Iustin Pop
    except re.error, err:
4718 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4719 73415719 Iustin Pop
                                 (self.op.pattern, err))
4720 73415719 Iustin Pop
4721 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4722 73415719 Iustin Pop
    """Returns the tag list.
4723 73415719 Iustin Pop

4724 73415719 Iustin Pop
    """
4725 73415719 Iustin Pop
    cfg = self.cfg
4726 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4727 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4728 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4729 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4730 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4731 73415719 Iustin Pop
    results = []
4732 73415719 Iustin Pop
    for path, target in tgts:
4733 73415719 Iustin Pop
      for tag in target.GetTags():
4734 73415719 Iustin Pop
        if self.re.search(tag):
4735 73415719 Iustin Pop
          results.append((path, tag))
4736 73415719 Iustin Pop
    return results
4737 73415719 Iustin Pop
4738 73415719 Iustin Pop
4739 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4740 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4741 5c947f38 Iustin Pop

4742 5c947f38 Iustin Pop
  """
4743 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4744 5c947f38 Iustin Pop
4745 5c947f38 Iustin Pop
  def CheckPrereq(self):
4746 5c947f38 Iustin Pop
    """Check prerequisites.
4747 5c947f38 Iustin Pop

4748 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4749 5c947f38 Iustin Pop

4750 5c947f38 Iustin Pop
    """
4751 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4752 f27302fa Iustin Pop
    for tag in self.op.tags:
4753 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4754 5c947f38 Iustin Pop
4755 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4756 5c947f38 Iustin Pop
    """Sets the tag.
4757 5c947f38 Iustin Pop

4758 5c947f38 Iustin Pop
    """
4759 5c947f38 Iustin Pop
    try:
4760 f27302fa Iustin Pop
      for tag in self.op.tags:
4761 f27302fa Iustin Pop
        self.target.AddTag(tag)
4762 5c947f38 Iustin Pop
    except errors.TagError, err:
4763 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4764 5c947f38 Iustin Pop
    try:
4765 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4766 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4767 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4768 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4769 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4770 5c947f38 Iustin Pop
4771 5c947f38 Iustin Pop
4772 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4773 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4774 5c947f38 Iustin Pop

4775 5c947f38 Iustin Pop
  """
4776 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4777 5c947f38 Iustin Pop
4778 5c947f38 Iustin Pop
  def CheckPrereq(self):
4779 5c947f38 Iustin Pop
    """Check prerequisites.
4780 5c947f38 Iustin Pop

4781 5c947f38 Iustin Pop
    This checks that we have the given tag.
4782 5c947f38 Iustin Pop

4783 5c947f38 Iustin Pop
    """
4784 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4785 f27302fa Iustin Pop
    for tag in self.op.tags:
4786 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4787 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4788 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4789 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4790 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4791 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4792 f27302fa Iustin Pop
      diff_names.sort()
4793 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4794 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4795 5c947f38 Iustin Pop
4796 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4797 5c947f38 Iustin Pop
    """Remove the tag from the object.
4798 5c947f38 Iustin Pop

4799 5c947f38 Iustin Pop
    """
4800 f27302fa Iustin Pop
    for tag in self.op.tags:
4801 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4802 5c947f38 Iustin Pop
    try:
4803 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4804 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4805 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4806 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4807 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4808 06009e27 Iustin Pop
4809 0eed6e61 Guido Trotter
4810 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
4811 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
4812 06009e27 Iustin Pop

4813 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
4814 06009e27 Iustin Pop
  time.
4815 06009e27 Iustin Pop

4816 06009e27 Iustin Pop
  """
4817 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
4818 fbe9022f Guido Trotter
  REQ_BGL = False
4819 06009e27 Iustin Pop
4820 fbe9022f Guido Trotter
  def ExpandNames(self):
4821 fbe9022f Guido Trotter
    """Expand names and set required locks.
4822 06009e27 Iustin Pop

4823 fbe9022f Guido Trotter
    This expands the node list, if any.
4824 06009e27 Iustin Pop

4825 06009e27 Iustin Pop
    """
4826 fbe9022f Guido Trotter
    self.needed_locks = {}
4827 06009e27 Iustin Pop
    if self.op.on_nodes:
4828 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
4829 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
4830 fbe9022f Guido Trotter
      # more information.
4831 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
4832 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
4833 fbe9022f Guido Trotter
4834 fbe9022f Guido Trotter
  def CheckPrereq(self):
4835 fbe9022f Guido Trotter
    """Check prerequisites.
4836 fbe9022f Guido Trotter

4837 fbe9022f Guido Trotter
    """
4838 06009e27 Iustin Pop
4839 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
4840 06009e27 Iustin Pop
    """Do the actual sleep.
4841 06009e27 Iustin Pop

4842 06009e27 Iustin Pop
    """
4843 06009e27 Iustin Pop
    if self.op.on_master:
4844 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
4845 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
4846 06009e27 Iustin Pop
    if self.op.on_nodes:
4847 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
4848 06009e27 Iustin Pop
      if not result:
4849 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
4850 06009e27 Iustin Pop
      for node, node_result in result.items():
4851 06009e27 Iustin Pop
        if not node_result:
4852 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
4853 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
4854 d61df03e Iustin Pop
4855 d61df03e Iustin Pop
4856 d1c2dd75 Iustin Pop
class IAllocator(object):
4857 d1c2dd75 Iustin Pop
  """IAllocator framework.
4858 d61df03e Iustin Pop

4859 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
4860 d1c2dd75 Iustin Pop
    - cfg/sstore that are needed to query the cluster
4861 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
4862 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
4863 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
4864 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
4865 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
4866 d1c2dd75 Iustin Pop
      easy usage
4867 d61df03e Iustin Pop

4868 d61df03e Iustin Pop
  """
4869 29859cb7 Iustin Pop
  _ALLO_KEYS = [
4870 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
4871 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
4872 d1c2dd75 Iustin Pop
    ]
4873 29859cb7 Iustin Pop
  _RELO_KEYS = [
4874 29859cb7 Iustin Pop
    "relocate_from",
4875 29859cb7 Iustin Pop
    ]
4876 d1c2dd75 Iustin Pop
4877 29859cb7 Iustin Pop
  def __init__(self, cfg, sstore, mode, name, **kwargs):
4878 d1c2dd75 Iustin Pop
    self.cfg = cfg
4879 d1c2dd75 Iustin Pop
    self.sstore = sstore
4880 d1c2dd75 Iustin Pop
    # init buffer variables
4881 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
4882 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
4883 29859cb7 Iustin Pop
    self.mode = mode
4884 29859cb7 Iustin Pop
    self.name = name
4885 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
4886 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
4887 29859cb7 Iustin Pop
    self.relocate_from = None
4888 27579978 Iustin Pop
    # computed fields
4889 27579978 Iustin Pop
    self.required_nodes = None
4890 d1c2dd75 Iustin Pop
    # init result fields
4891 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
4892 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
4893 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
4894 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
4895 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
4896 29859cb7 Iustin Pop
    else:
4897 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
4898 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
4899 d1c2dd75 Iustin Pop
    for key in kwargs:
4900 29859cb7 Iustin Pop
      if key not in keyset:
4901 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
4902 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4903 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
4904 29859cb7 Iustin Pop
    for key in keyset:
4905 d1c2dd75 Iustin Pop
      if key not in kwargs:
4906 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
4907 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4908 d1c2dd75 Iustin Pop
    self._BuildInputData()
4909 d1c2dd75 Iustin Pop
4910 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
4911 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
4912 d1c2dd75 Iustin Pop

4913 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
4914 d1c2dd75 Iustin Pop

4915 d1c2dd75 Iustin Pop
    """
4916 d1c2dd75 Iustin Pop
    cfg = self.cfg
4917 d1c2dd75 Iustin Pop
    # cluster data
4918 d1c2dd75 Iustin Pop
    data = {
4919 d1c2dd75 Iustin Pop
      "version": 1,
4920 d1c2dd75 Iustin Pop
      "cluster_name": self.sstore.GetClusterName(),
4921 d1c2dd75 Iustin Pop
      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
4922 6286519f Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
4923 d1c2dd75 Iustin Pop
      # we don't have job IDs
4924 d61df03e Iustin Pop
      }
4925 d61df03e Iustin Pop
4926 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
4927 6286519f Iustin Pop
4928 d1c2dd75 Iustin Pop
    # node data
4929 d1c2dd75 Iustin Pop
    node_results = {}
4930 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
4931 d1c2dd75 Iustin Pop
    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
4932 d1c2dd75 Iustin Pop
    for nname in node_list:
4933 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
4934 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
4935 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
4936 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
4937 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
4938 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
4939 d1c2dd75 Iustin Pop
        if attr not in remote_info:
4940 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
4941 d1c2dd75 Iustin Pop
                                   (nname, attr))
4942 d1c2dd75 Iustin Pop
        try:
4943 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
4944 d1c2dd75 Iustin Pop
        except ValueError, err:
4945 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
4946 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
4947 6286519f Iustin Pop
      # compute memory used by primary instances
4948 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
4949 6286519f Iustin Pop
      for iinfo in i_list:
4950 6286519f Iustin Pop
        if iinfo.primary_node == nname:
4951 6286519f Iustin Pop
          i_p_mem += iinfo.memory
4952 6286519f Iustin Pop
          if iinfo.status == "up":
4953 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
4954 6286519f Iustin Pop
4955 b2662e7f Iustin Pop
      # compute memory used by instances
4956 d1c2dd75 Iustin Pop
      pnr = {
4957 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
4958 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
4959 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
4960 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
4961 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
4962 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
4963 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
4964 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
4965 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
4966 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
4967 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
4968 d1c2dd75 Iustin Pop
        }
4969 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
4970 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
4971 d1c2dd75 Iustin Pop
4972 d1c2dd75 Iustin Pop
    # instance data
4973 d1c2dd75 Iustin Pop
    instance_data = {}
4974 6286519f Iustin Pop
    for iinfo in i_list:
4975 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
4976 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
4977 d1c2dd75 Iustin Pop
      pir = {
4978 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
4979 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
4980 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
4981 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
4982 d1c2dd75 Iustin Pop
        "os": iinfo.os,
4983 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
4984 d1c2dd75 Iustin Pop
        "nics": nic_data,
4985 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
4986 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
4987 d1c2dd75 Iustin Pop
        }
4988 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
4989 d61df03e Iustin Pop
4990 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
4991 d61df03e Iustin Pop
4992 d1c2dd75 Iustin Pop
    self.in_data = data
4993 d61df03e Iustin Pop
4994 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
4995 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
4996 d61df03e Iustin Pop

4997 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
4998 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
4999 d61df03e Iustin Pop

5000 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5001 d1c2dd75 Iustin Pop
    done.
5002 d61df03e Iustin Pop

5003 d1c2dd75 Iustin Pop
    """
5004 d1c2dd75 Iustin Pop
    data = self.in_data
5005 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5006 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5007 d1c2dd75 Iustin Pop
5008 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
5009 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
5010 d1c2dd75 Iustin Pop
5011 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5012 27579978 Iustin Pop
      self.required_nodes = 2
5013 27579978 Iustin Pop
    else:
5014 27579978 Iustin Pop
      self.required_nodes = 1
5015 d1c2dd75 Iustin Pop
    request = {
5016 d1c2dd75 Iustin Pop
      "type": "allocate",
5017 d1c2dd75 Iustin Pop
      "name": self.name,
5018 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5019 d1c2dd75 Iustin Pop
      "tags": self.tags,
5020 d1c2dd75 Iustin Pop
      "os": self.os,
5021 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5022 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5023 d1c2dd75 Iustin Pop
      "disks": self.disks,
5024 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5025 d1c2dd75 Iustin Pop
      "nics": self.nics,
5026 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5027 d1c2dd75 Iustin Pop
      }
5028 d1c2dd75 Iustin Pop
    data["request"] = request
5029 298fe380 Iustin Pop
5030 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5031 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5032 298fe380 Iustin Pop

5033 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5034 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5035 d61df03e Iustin Pop

5036 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5037 d1c2dd75 Iustin Pop
    done.
5038 d61df03e Iustin Pop

5039 d1c2dd75 Iustin Pop
    """
5040 27579978 Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.name)
5041 27579978 Iustin Pop
    if instance is None:
5042 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5043 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5044 27579978 Iustin Pop
5045 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5046 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5047 27579978 Iustin Pop
5048 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5049 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5050 2a139bb0 Iustin Pop
5051 27579978 Iustin Pop
    self.required_nodes = 1
5052 27579978 Iustin Pop
5053 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
5054 27579978 Iustin Pop
                                  instance.disks[0].size,
5055 27579978 Iustin Pop
                                  instance.disks[1].size)
5056 27579978 Iustin Pop
5057 d1c2dd75 Iustin Pop
    request = {
5058 2a139bb0 Iustin Pop
      "type": "relocate",
5059 d1c2dd75 Iustin Pop
      "name": self.name,
5060 27579978 Iustin Pop
      "disk_space_total": disk_space,
5061 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5062 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5063 d1c2dd75 Iustin Pop
      }
5064 27579978 Iustin Pop
    self.in_data["request"] = request
5065 d61df03e Iustin Pop
5066 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5067 d1c2dd75 Iustin Pop
    """Build input data structures.
5068 d61df03e Iustin Pop

5069 d1c2dd75 Iustin Pop
    """
5070 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5071 d61df03e Iustin Pop
5072 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5073 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5074 d1c2dd75 Iustin Pop
    else:
5075 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5076 d61df03e Iustin Pop
5077 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5078 d61df03e Iustin Pop
5079 8d528b7c Iustin Pop
  def Run(self, name, validate=True, call_fn=rpc.call_iallocator_runner):
5080 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5081 298fe380 Iustin Pop

5082 d1c2dd75 Iustin Pop
    """
5083 d1c2dd75 Iustin Pop
    data = self.in_text
5084 298fe380 Iustin Pop
5085 8d528b7c Iustin Pop
    result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
5086 298fe380 Iustin Pop
5087 8d528b7c Iustin Pop
    if not isinstance(result, tuple) or len(result) != 4:
5088 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5089 8d528b7c Iustin Pop
5090 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5091 8d528b7c Iustin Pop
5092 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5093 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5094 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5095 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5096 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5097 8d528b7c Iustin Pop
    self.out_text = stdout
5098 d1c2dd75 Iustin Pop
    if validate:
5099 d1c2dd75 Iustin Pop
      self._ValidateResult()
5100 298fe380 Iustin Pop
5101 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5102 d1c2dd75 Iustin Pop
    """Process the allocator results.
5103 538475ca Iustin Pop

5104 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5105 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5106 538475ca Iustin Pop

5107 d1c2dd75 Iustin Pop
    """
5108 d1c2dd75 Iustin Pop
    try:
5109 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5110 d1c2dd75 Iustin Pop
    except Exception, err:
5111 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5112 d1c2dd75 Iustin Pop
5113 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5114 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5115 538475ca Iustin Pop
5116 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5117 d1c2dd75 Iustin Pop
      if key not in rdict:
5118 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5119 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5120 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5121 538475ca Iustin Pop
5122 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5123 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5124 d1c2dd75 Iustin Pop
                               " is not a list")
5125 d1c2dd75 Iustin Pop
    self.out_data = rdict
5126 538475ca Iustin Pop
5127 538475ca Iustin Pop
5128 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5129 d61df03e Iustin Pop
  """Run allocator tests.
5130 d61df03e Iustin Pop

5131 d61df03e Iustin Pop
  This LU runs the allocator tests
5132 d61df03e Iustin Pop

5133 d61df03e Iustin Pop
  """
5134 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5135 d61df03e Iustin Pop
5136 d61df03e Iustin Pop
  def CheckPrereq(self):
5137 d61df03e Iustin Pop
    """Check prerequisites.
5138 d61df03e Iustin Pop

5139 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5140 d61df03e Iustin Pop

5141 d61df03e Iustin Pop
    """
5142 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5143 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5144 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5145 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5146 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5147 d61df03e Iustin Pop
                                     attr)
5148 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5149 d61df03e Iustin Pop
      if iname is not None:
5150 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5151 d61df03e Iustin Pop
                                   iname)
5152 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5153 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5154 d61df03e Iustin Pop
      for row in self.op.nics:
5155 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5156 d61df03e Iustin Pop
            "mac" not in row or
5157 d61df03e Iustin Pop
            "ip" not in row or
5158 d61df03e Iustin Pop
            "bridge" not in row):
5159 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5160 d61df03e Iustin Pop
                                     " 'nics' parameter")
5161 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5162 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5163 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5164 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5165 d61df03e Iustin Pop
      for row in self.op.disks:
5166 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5167 d61df03e Iustin Pop
            "size" not in row or
5168 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5169 d61df03e Iustin Pop
            "mode" not in row or
5170 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5171 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5172 d61df03e Iustin Pop
                                     " 'disks' parameter")
5173 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5174 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5175 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5176 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5177 d61df03e Iustin Pop
      if fname is None:
5178 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5179 d61df03e Iustin Pop
                                   self.op.name)
5180 d61df03e Iustin Pop
      self.op.name = fname
5181 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5182 d61df03e Iustin Pop
    else:
5183 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5184 d61df03e Iustin Pop
                                 self.op.mode)
5185 d61df03e Iustin Pop
5186 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5187 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5188 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5189 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5190 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5191 d61df03e Iustin Pop
                                 self.op.direction)
5192 d61df03e Iustin Pop
5193 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5194 d61df03e Iustin Pop
    """Run the allocator test.
5195 d61df03e Iustin Pop

5196 d61df03e Iustin Pop
    """
5197 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5198 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5199 29859cb7 Iustin Pop
                       mode=self.op.mode,
5200 29859cb7 Iustin Pop
                       name=self.op.name,
5201 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5202 29859cb7 Iustin Pop
                       disks=self.op.disks,
5203 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5204 29859cb7 Iustin Pop
                       os=self.op.os,
5205 29859cb7 Iustin Pop
                       tags=self.op.tags,
5206 29859cb7 Iustin Pop
                       nics=self.op.nics,
5207 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5208 29859cb7 Iustin Pop
                       )
5209 29859cb7 Iustin Pop
    else:
5210 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5211 29859cb7 Iustin Pop
                       mode=self.op.mode,
5212 29859cb7 Iustin Pop
                       name=self.op.name,
5213 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5214 29859cb7 Iustin Pop
                       )
5215 d61df03e Iustin Pop
5216 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5217 d1c2dd75 Iustin Pop
      result = ial.in_text
5218 298fe380 Iustin Pop
    else:
5219 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5220 d1c2dd75 Iustin Pop
      result = ial.out_text
5221 298fe380 Iustin Pop
    return result