Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 204f2086

History | View | Annotate | Download (179.2 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 8d14b30d Iustin Pop
from ganeti import serializer
45 d61df03e Iustin Pop
46 d61df03e Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 d465bdc8 Guido Trotter
    - implement ExpandNames
52 d465bdc8 Guido Trotter
    - implement CheckPrereq
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 05f86716 Guido Trotter
    - optionally redefine their run requirements:
57 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
58 05f86716 Guido Trotter
        REQ_WSSTORE: the LU needs a writable SimpleStore
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 a8083063 Iustin Pop
  REQ_MASTER = True
68 05f86716 Guido Trotter
  REQ_WSSTORE = False
69 7e55040e Guido Trotter
  REQ_BGL = True
70 a8083063 Iustin Pop
71 77b657a3 Guido Trotter
  def __init__(self, processor, op, context, sstore):
72 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
75 a8083063 Iustin Pop
    validity.
76 a8083063 Iustin Pop

77 a8083063 Iustin Pop
    """
78 5bfac263 Iustin Pop
    self.proc = processor
79 a8083063 Iustin Pop
    self.op = op
80 77b657a3 Guido Trotter
    self.cfg = context.cfg
81 a8083063 Iustin Pop
    self.sstore = sstore
82 77b657a3 Guido Trotter
    self.context = context
83 d465bdc8 Guido Trotter
    self.needed_locks = None
84 6683bba2 Guido Trotter
    self.acquired_locks = {}
85 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 c92b310a Michael Hanselmann
90 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
91 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
92 a8083063 Iustin Pop
      if attr_val is None:
93 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
94 3ecf6786 Iustin Pop
                                   attr_name)
95 c6d58a2b Michael Hanselmann
96 f64c9de6 Guido Trotter
    if not self.cfg.IsCluster():
97 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
98 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
99 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
100 c6d58a2b Michael Hanselmann
      master = sstore.GetMasterNode()
101 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
102 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
103 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
104 a8083063 Iustin Pop
105 c92b310a Michael Hanselmann
  def __GetSSH(self):
106 c92b310a Michael Hanselmann
    """Returns the SshRunner object
107 c92b310a Michael Hanselmann

108 c92b310a Michael Hanselmann
    """
109 c92b310a Michael Hanselmann
    if not self.__ssh:
110 1ff08570 Michael Hanselmann
      self.__ssh = ssh.SshRunner(self.sstore)
111 c92b310a Michael Hanselmann
    return self.__ssh
112 c92b310a Michael Hanselmann
113 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
114 c92b310a Michael Hanselmann
115 d465bdc8 Guido Trotter
  def ExpandNames(self):
116 d465bdc8 Guido Trotter
    """Expand names for this LU.
117 d465bdc8 Guido Trotter

118 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
119 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
120 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
121 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
122 d465bdc8 Guido Trotter

123 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
124 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
125 d465bdc8 Guido Trotter
    as values. Rules:
126 d465bdc8 Guido Trotter
      - Use an empty dict if you don't need any lock
127 d465bdc8 Guido Trotter
      - If you don't need any lock at a particular level omit that level
128 d465bdc8 Guido Trotter
      - Don't put anything for the BGL level
129 d465bdc8 Guido Trotter
      - If you want all locks at a level use None as a value
130 d465bdc8 Guido Trotter
        (this reflects what LockSet does, and will be replaced before
131 d465bdc8 Guido Trotter
        CheckPrereq with the full list of nodes that have been locked)
132 d465bdc8 Guido Trotter

133 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
134 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
135 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
136 3977a4c1 Guido Trotter

137 d465bdc8 Guido Trotter
    Examples:
138 d465bdc8 Guido Trotter
    # Acquire all nodes and one instance
139 d465bdc8 Guido Trotter
    self.needed_locks = {
140 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: None,
141 d465bdc8 Guido Trotter
      locking.LEVEL_INSTANCES: ['instance1.example.tld'],
142 d465bdc8 Guido Trotter
    }
143 d465bdc8 Guido Trotter
    # Acquire just two nodes
144 d465bdc8 Guido Trotter
    self.needed_locks = {
145 d465bdc8 Guido Trotter
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
146 d465bdc8 Guido Trotter
    }
147 d465bdc8 Guido Trotter
    # Acquire no locks
148 d465bdc8 Guido Trotter
    self.needed_locks = {} # No, you can't leave it to the default value None
149 d465bdc8 Guido Trotter

150 d465bdc8 Guido Trotter
    """
151 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
152 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
153 d465bdc8 Guido Trotter
    # time.
154 d465bdc8 Guido Trotter
    if self.REQ_BGL:
155 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
156 d465bdc8 Guido Trotter
    else:
157 d465bdc8 Guido Trotter
      raise NotImplementedError
158 d465bdc8 Guido Trotter
159 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
160 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
161 fb8dcb62 Guido Trotter

162 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
163 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
164 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
165 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
166 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
167 fb8dcb62 Guido Trotter
    default it does nothing.
168 fb8dcb62 Guido Trotter

169 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
170 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
171 fb8dcb62 Guido Trotter

172 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
173 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
174 fb8dcb62 Guido Trotter

175 fb8dcb62 Guido Trotter
    """
176 fb8dcb62 Guido Trotter
177 a8083063 Iustin Pop
  def CheckPrereq(self):
178 a8083063 Iustin Pop
    """Check prerequisites for this LU.
179 a8083063 Iustin Pop

180 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
181 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
182 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
183 a8083063 Iustin Pop
    allowed.
184 a8083063 Iustin Pop

185 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
186 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
187 a8083063 Iustin Pop

188 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
189 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
190 a8083063 Iustin Pop

191 a8083063 Iustin Pop
    """
192 a8083063 Iustin Pop
    raise NotImplementedError
193 a8083063 Iustin Pop
194 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
195 a8083063 Iustin Pop
    """Execute the LU.
196 a8083063 Iustin Pop

197 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
198 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
199 a8083063 Iustin Pop
    code, or expected.
200 a8083063 Iustin Pop

201 a8083063 Iustin Pop
    """
202 a8083063 Iustin Pop
    raise NotImplementedError
203 a8083063 Iustin Pop
204 a8083063 Iustin Pop
  def BuildHooksEnv(self):
205 a8083063 Iustin Pop
    """Build hooks environment for this LU.
206 a8083063 Iustin Pop

207 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
208 a8083063 Iustin Pop
    containing the environment that will be used for running the
209 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
210 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
211 a8083063 Iustin Pop
    the hook should run after the execution.
212 a8083063 Iustin Pop

213 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
214 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
215 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
216 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
217 a8083063 Iustin Pop

218 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
219 a8083063 Iustin Pop

220 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
221 a8083063 Iustin Pop
    not be called.
222 a8083063 Iustin Pop

223 a8083063 Iustin Pop
    """
224 a8083063 Iustin Pop
    raise NotImplementedError
225 a8083063 Iustin Pop
226 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
227 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
228 1fce5219 Guido Trotter

229 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
230 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
231 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
232 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
233 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
234 1fce5219 Guido Trotter

235 1fce5219 Guido Trotter
    Args:
236 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
237 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
238 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
239 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
240 1fce5219 Guido Trotter

241 1fce5219 Guido Trotter
    """
242 1fce5219 Guido Trotter
    return lu_result
243 1fce5219 Guido Trotter
244 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
245 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
246 43905206 Guido Trotter

247 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
248 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
249 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
250 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
251 43905206 Guido Trotter
    before.
252 43905206 Guido Trotter

253 43905206 Guido Trotter
    """
254 43905206 Guido Trotter
    if self.needed_locks is None:
255 43905206 Guido Trotter
      self.needed_locks = {}
256 43905206 Guido Trotter
    else:
257 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
258 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
259 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
260 43905206 Guido Trotter
    if expanded_name is None:
261 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
262 43905206 Guido Trotter
                                  self.op.instance_name)
263 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
264 43905206 Guido Trotter
    self.op.instance_name = expanded_name
265 43905206 Guido Trotter
266 c4a2fee1 Guido Trotter
  def _LockInstancesNodes(self):
267 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
268 c4a2fee1 Guido Trotter

269 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
270 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
271 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
272 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
273 c4a2fee1 Guido Trotter

274 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
275 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
276 c4a2fee1 Guido Trotter

277 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
278 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
279 c4a2fee1 Guido Trotter

280 c4a2fee1 Guido Trotter
    If should be called in DeclareLocks in a way similar to:
281 c4a2fee1 Guido Trotter

282 c4a2fee1 Guido Trotter
    if level == locking.LEVEL_NODE:
283 c4a2fee1 Guido Trotter
      self._LockInstancesNodes()
284 c4a2fee1 Guido Trotter

285 c4a2fee1 Guido Trotter
    """
286 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
287 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
288 c4a2fee1 Guido Trotter
289 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
290 c4a2fee1 Guido Trotter
291 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
292 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
293 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
294 c4a2fee1 Guido Trotter
    wanted_nodes = []
295 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
296 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
297 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
298 c4a2fee1 Guido Trotter
      wanted_nodes.extend(instance.secondary_nodes)
299 c4a2fee1 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
300 c4a2fee1 Guido Trotter
301 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
302 c4a2fee1 Guido Trotter
303 a8083063 Iustin Pop
304 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
305 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
306 a8083063 Iustin Pop

307 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
308 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
309 a8083063 Iustin Pop

310 a8083063 Iustin Pop
  """
311 a8083063 Iustin Pop
  HPATH = None
312 a8083063 Iustin Pop
  HTYPE = None
313 a8083063 Iustin Pop
314 a8083063 Iustin Pop
315 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
316 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
317 83120a01 Michael Hanselmann

318 83120a01 Michael Hanselmann
  Args:
319 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
320 83120a01 Michael Hanselmann

321 83120a01 Michael Hanselmann
  """
322 3312b702 Iustin Pop
  if not isinstance(nodes, list):
323 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
324 dcb93971 Michael Hanselmann
325 dcb93971 Michael Hanselmann
  if nodes:
326 3312b702 Iustin Pop
    wanted = []
327 dcb93971 Michael Hanselmann
328 dcb93971 Michael Hanselmann
    for name in nodes:
329 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
330 dcb93971 Michael Hanselmann
      if node is None:
331 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
332 3312b702 Iustin Pop
      wanted.append(node)
333 dcb93971 Michael Hanselmann
334 dcb93971 Michael Hanselmann
  else:
335 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
336 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
337 3312b702 Iustin Pop
338 3312b702 Iustin Pop
339 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
340 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
341 3312b702 Iustin Pop

342 3312b702 Iustin Pop
  Args:
343 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
344 3312b702 Iustin Pop

345 3312b702 Iustin Pop
  """
346 3312b702 Iustin Pop
  if not isinstance(instances, list):
347 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
348 3312b702 Iustin Pop
349 3312b702 Iustin Pop
  if instances:
350 3312b702 Iustin Pop
    wanted = []
351 3312b702 Iustin Pop
352 3312b702 Iustin Pop
    for name in instances:
353 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
354 3312b702 Iustin Pop
      if instance is None:
355 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
356 3312b702 Iustin Pop
      wanted.append(instance)
357 3312b702 Iustin Pop
358 3312b702 Iustin Pop
  else:
359 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
360 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
361 dcb93971 Michael Hanselmann
362 dcb93971 Michael Hanselmann
363 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
364 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
365 83120a01 Michael Hanselmann

366 83120a01 Michael Hanselmann
  Args:
367 83120a01 Michael Hanselmann
    static: Static fields
368 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
369 83120a01 Michael Hanselmann

370 83120a01 Michael Hanselmann
  """
371 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
372 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
373 dcb93971 Michael Hanselmann
374 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
375 dcb93971 Michael Hanselmann
376 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
377 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
378 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
379 3ecf6786 Iustin Pop
                                          difference(all_fields)))
380 dcb93971 Michael Hanselmann
381 dcb93971 Michael Hanselmann
382 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
383 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
384 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
385 ecb215b5 Michael Hanselmann

386 ecb215b5 Michael Hanselmann
  Args:
387 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
388 396e1b78 Michael Hanselmann
  """
389 396e1b78 Michael Hanselmann
  env = {
390 0e137c28 Iustin Pop
    "OP_TARGET": name,
391 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
392 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
393 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
394 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
395 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
396 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
397 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
398 396e1b78 Michael Hanselmann
  }
399 396e1b78 Michael Hanselmann
400 396e1b78 Michael Hanselmann
  if nics:
401 396e1b78 Michael Hanselmann
    nic_count = len(nics)
402 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
403 396e1b78 Michael Hanselmann
      if ip is None:
404 396e1b78 Michael Hanselmann
        ip = ""
405 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
406 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
407 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
408 396e1b78 Michael Hanselmann
  else:
409 396e1b78 Michael Hanselmann
    nic_count = 0
410 396e1b78 Michael Hanselmann
411 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
412 396e1b78 Michael Hanselmann
413 396e1b78 Michael Hanselmann
  return env
414 396e1b78 Michael Hanselmann
415 396e1b78 Michael Hanselmann
416 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
417 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
418 ecb215b5 Michael Hanselmann

419 ecb215b5 Michael Hanselmann
  Args:
420 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
421 ecb215b5 Michael Hanselmann
    override: dict of values to override
422 ecb215b5 Michael Hanselmann
  """
423 396e1b78 Michael Hanselmann
  args = {
424 396e1b78 Michael Hanselmann
    'name': instance.name,
425 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
426 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
427 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
428 396e1b78 Michael Hanselmann
    'status': instance.os,
429 396e1b78 Michael Hanselmann
    'memory': instance.memory,
430 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
431 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
432 396e1b78 Michael Hanselmann
  }
433 396e1b78 Michael Hanselmann
  if override:
434 396e1b78 Michael Hanselmann
    args.update(override)
435 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
436 396e1b78 Michael Hanselmann
437 396e1b78 Michael Hanselmann
438 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
439 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
440 bf6929a2 Alexander Schreiber

441 bf6929a2 Alexander Schreiber
  """
442 bf6929a2 Alexander Schreiber
  # check bridges existance
443 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
444 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
445 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
446 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
447 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
448 bf6929a2 Alexander Schreiber
449 bf6929a2 Alexander Schreiber
450 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
451 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
452 a8083063 Iustin Pop

453 a8083063 Iustin Pop
  """
454 a8083063 Iustin Pop
  _OP_REQP = []
455 a8083063 Iustin Pop
456 a8083063 Iustin Pop
  def CheckPrereq(self):
457 a8083063 Iustin Pop
    """Check prerequisites.
458 a8083063 Iustin Pop

459 a8083063 Iustin Pop
    This checks whether the cluster is empty.
460 a8083063 Iustin Pop

461 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
462 a8083063 Iustin Pop

463 a8083063 Iustin Pop
    """
464 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
465 a8083063 Iustin Pop
466 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
467 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
468 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
469 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
470 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
471 db915bd1 Michael Hanselmann
    if instancelist:
472 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
473 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
474 a8083063 Iustin Pop
475 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
476 a8083063 Iustin Pop
    """Destroys the cluster.
477 a8083063 Iustin Pop

478 a8083063 Iustin Pop
    """
479 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
480 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
481 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
482 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
483 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
484 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
485 140aa4a8 Iustin Pop
    return master
486 a8083063 Iustin Pop
487 a8083063 Iustin Pop
488 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
489 a8083063 Iustin Pop
  """Verifies the cluster status.
490 a8083063 Iustin Pop

491 a8083063 Iustin Pop
  """
492 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
493 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
494 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
495 a8083063 Iustin Pop
496 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
497 a8083063 Iustin Pop
                  remote_version, feedback_fn):
498 a8083063 Iustin Pop
    """Run multiple tests against a node.
499 a8083063 Iustin Pop

500 a8083063 Iustin Pop
    Test list:
501 a8083063 Iustin Pop
      - compares ganeti version
502 a8083063 Iustin Pop
      - checks vg existance and size > 20G
503 a8083063 Iustin Pop
      - checks config file checksum
504 a8083063 Iustin Pop
      - checks ssh to other nodes
505 a8083063 Iustin Pop

506 a8083063 Iustin Pop
    Args:
507 a8083063 Iustin Pop
      node: name of the node to check
508 a8083063 Iustin Pop
      file_list: required list of files
509 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
510 098c0958 Michael Hanselmann

511 a8083063 Iustin Pop
    """
512 a8083063 Iustin Pop
    # compares ganeti version
513 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
514 a8083063 Iustin Pop
    if not remote_version:
515 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
516 a8083063 Iustin Pop
      return True
517 a8083063 Iustin Pop
518 a8083063 Iustin Pop
    if local_version != remote_version:
519 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
520 a8083063 Iustin Pop
                      (local_version, node, remote_version))
521 a8083063 Iustin Pop
      return True
522 a8083063 Iustin Pop
523 a8083063 Iustin Pop
    # checks vg existance and size > 20G
524 a8083063 Iustin Pop
525 a8083063 Iustin Pop
    bad = False
526 a8083063 Iustin Pop
    if not vglist:
527 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
528 a8083063 Iustin Pop
                      (node,))
529 a8083063 Iustin Pop
      bad = True
530 a8083063 Iustin Pop
    else:
531 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
532 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
533 a8083063 Iustin Pop
      if vgstatus:
534 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
535 a8083063 Iustin Pop
        bad = True
536 a8083063 Iustin Pop
537 a8083063 Iustin Pop
    # checks config file checksum
538 a8083063 Iustin Pop
    # checks ssh to any
539 a8083063 Iustin Pop
540 a8083063 Iustin Pop
    if 'filelist' not in node_result:
541 a8083063 Iustin Pop
      bad = True
542 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
543 a8083063 Iustin Pop
    else:
544 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
545 a8083063 Iustin Pop
      for file_name in file_list:
546 a8083063 Iustin Pop
        if file_name not in remote_cksum:
547 a8083063 Iustin Pop
          bad = True
548 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
549 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
550 a8083063 Iustin Pop
          bad = True
551 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
552 a8083063 Iustin Pop
553 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
554 a8083063 Iustin Pop
      bad = True
555 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
556 a8083063 Iustin Pop
    else:
557 a8083063 Iustin Pop
      if node_result['nodelist']:
558 a8083063 Iustin Pop
        bad = True
559 a8083063 Iustin Pop
        for node in node_result['nodelist']:
560 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
561 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
562 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
563 9d4bfc96 Iustin Pop
      bad = True
564 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
565 9d4bfc96 Iustin Pop
    else:
566 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
567 9d4bfc96 Iustin Pop
        bad = True
568 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
569 9d4bfc96 Iustin Pop
        for node in nlist:
570 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
571 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
572 9d4bfc96 Iustin Pop
573 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
574 a8083063 Iustin Pop
    if hyp_result is not None:
575 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
576 a8083063 Iustin Pop
    return bad
577 a8083063 Iustin Pop
578 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
579 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
580 a8083063 Iustin Pop
    """Verify an instance.
581 a8083063 Iustin Pop

582 a8083063 Iustin Pop
    This function checks to see if the required block devices are
583 a8083063 Iustin Pop
    available on the instance's node.
584 a8083063 Iustin Pop

585 a8083063 Iustin Pop
    """
586 a8083063 Iustin Pop
    bad = False
587 a8083063 Iustin Pop
588 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
589 a8083063 Iustin Pop
590 a8083063 Iustin Pop
    node_vol_should = {}
591 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
592 a8083063 Iustin Pop
593 a8083063 Iustin Pop
    for node in node_vol_should:
594 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
595 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
596 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
597 a8083063 Iustin Pop
                          (volume, node))
598 a8083063 Iustin Pop
          bad = True
599 a8083063 Iustin Pop
600 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
601 a872dae6 Guido Trotter
      if (node_current not in node_instance or
602 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
603 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
604 a8083063 Iustin Pop
                        (instance, node_current))
605 a8083063 Iustin Pop
        bad = True
606 a8083063 Iustin Pop
607 a8083063 Iustin Pop
    for node in node_instance:
608 a8083063 Iustin Pop
      if (not node == node_current):
609 a8083063 Iustin Pop
        if instance in node_instance[node]:
610 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
611 a8083063 Iustin Pop
                          (instance, node))
612 a8083063 Iustin Pop
          bad = True
613 a8083063 Iustin Pop
614 6a438c98 Michael Hanselmann
    return bad
615 a8083063 Iustin Pop
616 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
617 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
618 a8083063 Iustin Pop

619 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
620 a8083063 Iustin Pop
    reported as unknown.
621 a8083063 Iustin Pop

622 a8083063 Iustin Pop
    """
623 a8083063 Iustin Pop
    bad = False
624 a8083063 Iustin Pop
625 a8083063 Iustin Pop
    for node in node_vol_is:
626 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
627 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
628 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
629 a8083063 Iustin Pop
                      (volume, node))
630 a8083063 Iustin Pop
          bad = True
631 a8083063 Iustin Pop
    return bad
632 a8083063 Iustin Pop
633 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
634 a8083063 Iustin Pop
    """Verify the list of running instances.
635 a8083063 Iustin Pop

636 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
637 a8083063 Iustin Pop

638 a8083063 Iustin Pop
    """
639 a8083063 Iustin Pop
    bad = False
640 a8083063 Iustin Pop
    for node in node_instance:
641 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
642 a8083063 Iustin Pop
        if runninginstance not in instancelist:
643 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
644 a8083063 Iustin Pop
                          (runninginstance, node))
645 a8083063 Iustin Pop
          bad = True
646 a8083063 Iustin Pop
    return bad
647 a8083063 Iustin Pop
648 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
649 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
650 2b3b6ddd Guido Trotter

651 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
652 2b3b6ddd Guido Trotter
    was primary for.
653 2b3b6ddd Guido Trotter

654 2b3b6ddd Guido Trotter
    """
655 2b3b6ddd Guido Trotter
    bad = False
656 2b3b6ddd Guido Trotter
657 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
658 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
659 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
660 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
661 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
662 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
663 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
664 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
665 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
666 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
667 2b3b6ddd Guido Trotter
        needed_mem = 0
668 2b3b6ddd Guido Trotter
        for instance in instances:
669 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
670 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
671 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
672 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
673 2b3b6ddd Guido Trotter
          bad = True
674 2b3b6ddd Guido Trotter
    return bad
675 2b3b6ddd Guido Trotter
676 a8083063 Iustin Pop
  def CheckPrereq(self):
677 a8083063 Iustin Pop
    """Check prerequisites.
678 a8083063 Iustin Pop

679 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
680 e54c4c5e Guido Trotter
    all its members are valid.
681 a8083063 Iustin Pop

682 a8083063 Iustin Pop
    """
683 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
684 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
685 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
686 a8083063 Iustin Pop
687 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
688 d8fff41c Guido Trotter
    """Build hooks env.
689 d8fff41c Guido Trotter

690 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
691 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
692 d8fff41c Guido Trotter

693 d8fff41c Guido Trotter
    """
694 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
695 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
696 d8fff41c Guido Trotter
    env = {}
697 d8fff41c Guido Trotter
    return env, [], all_nodes
698 d8fff41c Guido Trotter
699 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
700 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
701 a8083063 Iustin Pop

702 a8083063 Iustin Pop
    """
703 a8083063 Iustin Pop
    bad = False
704 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
705 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
706 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
707 a8083063 Iustin Pop
708 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
709 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
710 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
711 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
712 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
713 a8083063 Iustin Pop
    node_volume = {}
714 a8083063 Iustin Pop
    node_instance = {}
715 9c9c7d30 Guido Trotter
    node_info = {}
716 26b6af5e Guido Trotter
    instance_cfg = {}
717 a8083063 Iustin Pop
718 a8083063 Iustin Pop
    # FIXME: verify OS list
719 a8083063 Iustin Pop
    # do local checksums
720 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
721 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
722 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
723 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
724 a8083063 Iustin Pop
725 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
726 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
727 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
728 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
729 a8083063 Iustin Pop
    node_verify_param = {
730 a8083063 Iustin Pop
      'filelist': file_names,
731 a8083063 Iustin Pop
      'nodelist': nodelist,
732 a8083063 Iustin Pop
      'hypervisor': None,
733 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
734 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
735 a8083063 Iustin Pop
      }
736 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
737 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
738 9c9c7d30 Guido Trotter
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
739 a8083063 Iustin Pop
740 a8083063 Iustin Pop
    for node in nodelist:
741 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
742 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
743 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
744 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
745 a8083063 Iustin Pop
      bad = bad or result
746 a8083063 Iustin Pop
747 a8083063 Iustin Pop
      # node_volume
748 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
749 a8083063 Iustin Pop
750 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
751 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
752 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
753 b63ed789 Iustin Pop
        bad = True
754 b63ed789 Iustin Pop
        node_volume[node] = {}
755 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
756 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
757 a8083063 Iustin Pop
        bad = True
758 a8083063 Iustin Pop
        continue
759 b63ed789 Iustin Pop
      else:
760 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
761 a8083063 Iustin Pop
762 a8083063 Iustin Pop
      # node_instance
763 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
764 a8083063 Iustin Pop
      if type(nodeinstance) != list:
765 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
766 a8083063 Iustin Pop
        bad = True
767 a8083063 Iustin Pop
        continue
768 a8083063 Iustin Pop
769 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
770 a8083063 Iustin Pop
771 9c9c7d30 Guido Trotter
      # node_info
772 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
773 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
774 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
775 9c9c7d30 Guido Trotter
        bad = True
776 9c9c7d30 Guido Trotter
        continue
777 9c9c7d30 Guido Trotter
778 9c9c7d30 Guido Trotter
      try:
779 9c9c7d30 Guido Trotter
        node_info[node] = {
780 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
781 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
782 93e4c50b Guido Trotter
          "pinst": [],
783 93e4c50b Guido Trotter
          "sinst": [],
784 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
785 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
786 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
787 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
788 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
789 36e7da50 Guido Trotter
          # secondary.
790 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
791 9c9c7d30 Guido Trotter
        }
792 9c9c7d30 Guido Trotter
      except ValueError:
793 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
794 9c9c7d30 Guido Trotter
        bad = True
795 9c9c7d30 Guido Trotter
        continue
796 9c9c7d30 Guido Trotter
797 a8083063 Iustin Pop
    node_vol_should = {}
798 a8083063 Iustin Pop
799 a8083063 Iustin Pop
    for instance in instancelist:
800 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
801 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
802 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
803 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
804 c5705f58 Guido Trotter
      bad = bad or result
805 a8083063 Iustin Pop
806 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
807 a8083063 Iustin Pop
808 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
809 26b6af5e Guido Trotter
810 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
811 93e4c50b Guido Trotter
      if pnode in node_info:
812 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
813 93e4c50b Guido Trotter
      else:
814 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
815 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
816 93e4c50b Guido Trotter
        bad = True
817 93e4c50b Guido Trotter
818 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
819 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
820 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
821 93e4c50b Guido Trotter
      # supported either.
822 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
823 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
824 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
825 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
826 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
827 93e4c50b Guido Trotter
                    % instance)
828 93e4c50b Guido Trotter
829 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
830 93e4c50b Guido Trotter
        if snode in node_info:
831 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
832 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
833 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
834 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
835 93e4c50b Guido Trotter
        else:
836 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
837 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
838 93e4c50b Guido Trotter
839 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
840 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
841 a8083063 Iustin Pop
                                       feedback_fn)
842 a8083063 Iustin Pop
    bad = bad or result
843 a8083063 Iustin Pop
844 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
845 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
846 a8083063 Iustin Pop
                                         feedback_fn)
847 a8083063 Iustin Pop
    bad = bad or result
848 a8083063 Iustin Pop
849 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
850 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
851 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
852 e54c4c5e Guido Trotter
      bad = bad or result
853 2b3b6ddd Guido Trotter
854 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
855 2b3b6ddd Guido Trotter
    if i_non_redundant:
856 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
857 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
858 2b3b6ddd Guido Trotter
859 34290825 Michael Hanselmann
    return not bad
860 a8083063 Iustin Pop
861 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
862 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
863 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
864 d8fff41c Guido Trotter

865 d8fff41c Guido Trotter
    Args:
866 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
867 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
868 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
869 d8fff41c Guido Trotter
      lu_result: previous Exec result
870 d8fff41c Guido Trotter

871 d8fff41c Guido Trotter
    """
872 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
873 38206f3c Iustin Pop
    # their results
874 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
875 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
876 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
877 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
878 d8fff41c Guido Trotter
      if not hooks_results:
879 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
880 d8fff41c Guido Trotter
        lu_result = 1
881 d8fff41c Guido Trotter
      else:
882 d8fff41c Guido Trotter
        for node_name in hooks_results:
883 d8fff41c Guido Trotter
          show_node_header = True
884 d8fff41c Guido Trotter
          res = hooks_results[node_name]
885 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
886 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
887 d8fff41c Guido Trotter
            lu_result = 1
888 d8fff41c Guido Trotter
            continue
889 d8fff41c Guido Trotter
          for script, hkr, output in res:
890 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
891 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
892 d8fff41c Guido Trotter
              # failing hooks on that node
893 d8fff41c Guido Trotter
              if show_node_header:
894 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
895 d8fff41c Guido Trotter
                show_node_header = False
896 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
897 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
898 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
899 d8fff41c Guido Trotter
              lu_result = 1
900 d8fff41c Guido Trotter
901 d8fff41c Guido Trotter
      return lu_result
902 d8fff41c Guido Trotter
903 a8083063 Iustin Pop
904 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
905 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
906 2c95a8d4 Iustin Pop

907 2c95a8d4 Iustin Pop
  """
908 2c95a8d4 Iustin Pop
  _OP_REQP = []
909 2c95a8d4 Iustin Pop
910 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
911 2c95a8d4 Iustin Pop
    """Check prerequisites.
912 2c95a8d4 Iustin Pop

913 2c95a8d4 Iustin Pop
    This has no prerequisites.
914 2c95a8d4 Iustin Pop

915 2c95a8d4 Iustin Pop
    """
916 2c95a8d4 Iustin Pop
    pass
917 2c95a8d4 Iustin Pop
918 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
919 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
920 2c95a8d4 Iustin Pop

921 2c95a8d4 Iustin Pop
    """
922 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
923 2c95a8d4 Iustin Pop
924 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
925 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
926 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
927 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
928 2c95a8d4 Iustin Pop
929 2c95a8d4 Iustin Pop
    nv_dict = {}
930 2c95a8d4 Iustin Pop
    for inst in instances:
931 2c95a8d4 Iustin Pop
      inst_lvs = {}
932 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
933 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
934 2c95a8d4 Iustin Pop
        continue
935 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
936 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
937 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
938 2c95a8d4 Iustin Pop
        for vol in vol_list:
939 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
940 2c95a8d4 Iustin Pop
941 2c95a8d4 Iustin Pop
    if not nv_dict:
942 2c95a8d4 Iustin Pop
      return result
943 2c95a8d4 Iustin Pop
944 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
945 2c95a8d4 Iustin Pop
946 2c95a8d4 Iustin Pop
    to_act = set()
947 2c95a8d4 Iustin Pop
    for node in nodes:
948 2c95a8d4 Iustin Pop
      # node_volume
949 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
950 2c95a8d4 Iustin Pop
951 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
952 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
953 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
954 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
955 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
956 2c95a8d4 Iustin Pop
                    (node,))
957 2c95a8d4 Iustin Pop
        res_nodes.append(node)
958 2c95a8d4 Iustin Pop
        continue
959 2c95a8d4 Iustin Pop
960 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
961 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
962 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
963 b63ed789 Iustin Pop
            and inst.name not in res_instances):
964 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
965 2c95a8d4 Iustin Pop
966 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
967 b63ed789 Iustin Pop
    # data better
968 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
969 b63ed789 Iustin Pop
      if inst.name not in res_missing:
970 b63ed789 Iustin Pop
        res_missing[inst.name] = []
971 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
972 b63ed789 Iustin Pop
973 2c95a8d4 Iustin Pop
    return result
974 2c95a8d4 Iustin Pop
975 2c95a8d4 Iustin Pop
976 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
977 07bd8a51 Iustin Pop
  """Rename the cluster.
978 07bd8a51 Iustin Pop

979 07bd8a51 Iustin Pop
  """
980 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
981 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
982 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
983 05f86716 Guido Trotter
  REQ_WSSTORE = True
984 07bd8a51 Iustin Pop
985 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
986 07bd8a51 Iustin Pop
    """Build hooks env.
987 07bd8a51 Iustin Pop

988 07bd8a51 Iustin Pop
    """
989 07bd8a51 Iustin Pop
    env = {
990 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
991 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
992 07bd8a51 Iustin Pop
      }
993 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
994 07bd8a51 Iustin Pop
    return env, [mn], [mn]
995 07bd8a51 Iustin Pop
996 07bd8a51 Iustin Pop
  def CheckPrereq(self):
997 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
998 07bd8a51 Iustin Pop

999 07bd8a51 Iustin Pop
    """
1000 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1001 07bd8a51 Iustin Pop
1002 bcf043c9 Iustin Pop
    new_name = hostname.name
1003 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1004 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
1005 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
1006 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1007 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1008 07bd8a51 Iustin Pop
                                 " cluster has changed")
1009 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1010 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1011 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1012 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1013 07bd8a51 Iustin Pop
                                   new_ip)
1014 07bd8a51 Iustin Pop
1015 07bd8a51 Iustin Pop
    self.op.name = new_name
1016 07bd8a51 Iustin Pop
1017 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1018 07bd8a51 Iustin Pop
    """Rename the cluster.
1019 07bd8a51 Iustin Pop

1020 07bd8a51 Iustin Pop
    """
1021 07bd8a51 Iustin Pop
    clustername = self.op.name
1022 07bd8a51 Iustin Pop
    ip = self.ip
1023 07bd8a51 Iustin Pop
    ss = self.sstore
1024 07bd8a51 Iustin Pop
1025 07bd8a51 Iustin Pop
    # shutdown the master IP
1026 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
1027 1c65840b Iustin Pop
    if not rpc.call_node_stop_master(master, False):
1028 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1029 07bd8a51 Iustin Pop
1030 07bd8a51 Iustin Pop
    try:
1031 07bd8a51 Iustin Pop
      # modify the sstore
1032 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1033 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1034 07bd8a51 Iustin Pop
1035 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1036 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1037 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1038 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1039 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1040 07bd8a51 Iustin Pop
1041 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1042 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1043 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1044 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1045 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1046 07bd8a51 Iustin Pop
          if not result[to_node]:
1047 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1048 07bd8a51 Iustin Pop
                         (fname, to_node))
1049 07bd8a51 Iustin Pop
    finally:
1050 1c65840b Iustin Pop
      if not rpc.call_node_start_master(master, False):
1051 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1052 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1053 07bd8a51 Iustin Pop
1054 07bd8a51 Iustin Pop
1055 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1056 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1057 8084f9f6 Manuel Franceschini

1058 8084f9f6 Manuel Franceschini
  Args:
1059 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1060 8084f9f6 Manuel Franceschini

1061 8084f9f6 Manuel Franceschini
  Returns:
1062 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1063 8084f9f6 Manuel Franceschini

1064 8084f9f6 Manuel Franceschini
  """
1065 8084f9f6 Manuel Franceschini
  if disk.children:
1066 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1067 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1068 8084f9f6 Manuel Franceschini
        return True
1069 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1070 8084f9f6 Manuel Franceschini
1071 8084f9f6 Manuel Franceschini
1072 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1073 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1074 8084f9f6 Manuel Franceschini

1075 8084f9f6 Manuel Franceschini
  """
1076 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1077 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1078 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1079 8084f9f6 Manuel Franceschini
1080 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1081 8084f9f6 Manuel Franceschini
    """Build hooks env.
1082 8084f9f6 Manuel Franceschini

1083 8084f9f6 Manuel Franceschini
    """
1084 8084f9f6 Manuel Franceschini
    env = {
1085 8084f9f6 Manuel Franceschini
      "OP_TARGET": self.sstore.GetClusterName(),
1086 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1087 8084f9f6 Manuel Franceschini
      }
1088 8084f9f6 Manuel Franceschini
    mn = self.sstore.GetMasterNode()
1089 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1090 8084f9f6 Manuel Franceschini
1091 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1092 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1093 8084f9f6 Manuel Franceschini

1094 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1095 5f83e263 Iustin Pop
    if the given volume group is valid.
1096 8084f9f6 Manuel Franceschini

1097 8084f9f6 Manuel Franceschini
    """
1098 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
1099 8084f9f6 Manuel Franceschini
      instances = [self.cfg.GetInstanceInfo(name)
1100 8084f9f6 Manuel Franceschini
                   for name in self.cfg.GetInstanceList()]
1101 8084f9f6 Manuel Franceschini
      for inst in instances:
1102 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1103 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1104 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1105 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1106 8084f9f6 Manuel Franceschini
1107 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1108 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1109 8084f9f6 Manuel Franceschini
      node_list = self.cfg.GetNodeList()
1110 8084f9f6 Manuel Franceschini
      vglist = rpc.call_vg_list(node_list)
1111 8084f9f6 Manuel Franceschini
      for node in node_list:
1112 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1113 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1114 8084f9f6 Manuel Franceschini
        if vgstatus:
1115 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1116 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1117 8084f9f6 Manuel Franceschini
1118 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1119 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1120 8084f9f6 Manuel Franceschini

1121 8084f9f6 Manuel Franceschini
    """
1122 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
1123 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
1124 8084f9f6 Manuel Franceschini
    else:
1125 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1126 8084f9f6 Manuel Franceschini
                  " state, not changing")
1127 8084f9f6 Manuel Franceschini
1128 8084f9f6 Manuel Franceschini
1129 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1130 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1131 a8083063 Iustin Pop

1132 a8083063 Iustin Pop
  """
1133 a8083063 Iustin Pop
  if not instance.disks:
1134 a8083063 Iustin Pop
    return True
1135 a8083063 Iustin Pop
1136 a8083063 Iustin Pop
  if not oneshot:
1137 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1138 a8083063 Iustin Pop
1139 a8083063 Iustin Pop
  node = instance.primary_node
1140 a8083063 Iustin Pop
1141 a8083063 Iustin Pop
  for dev in instance.disks:
1142 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1143 a8083063 Iustin Pop
1144 a8083063 Iustin Pop
  retries = 0
1145 a8083063 Iustin Pop
  while True:
1146 a8083063 Iustin Pop
    max_time = 0
1147 a8083063 Iustin Pop
    done = True
1148 a8083063 Iustin Pop
    cumul_degraded = False
1149 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1150 a8083063 Iustin Pop
    if not rstats:
1151 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1152 a8083063 Iustin Pop
      retries += 1
1153 a8083063 Iustin Pop
      if retries >= 10:
1154 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1155 3ecf6786 Iustin Pop
                                 " aborting." % node)
1156 a8083063 Iustin Pop
      time.sleep(6)
1157 a8083063 Iustin Pop
      continue
1158 a8083063 Iustin Pop
    retries = 0
1159 a8083063 Iustin Pop
    for i in range(len(rstats)):
1160 a8083063 Iustin Pop
      mstat = rstats[i]
1161 a8083063 Iustin Pop
      if mstat is None:
1162 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1163 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1164 a8083063 Iustin Pop
        continue
1165 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1166 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1167 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1168 a8083063 Iustin Pop
      if perc_done is not None:
1169 a8083063 Iustin Pop
        done = False
1170 a8083063 Iustin Pop
        if est_time is not None:
1171 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1172 a8083063 Iustin Pop
          max_time = est_time
1173 a8083063 Iustin Pop
        else:
1174 a8083063 Iustin Pop
          rem_time = "no time estimate"
1175 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1176 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1177 a8083063 Iustin Pop
    if done or oneshot:
1178 a8083063 Iustin Pop
      break
1179 a8083063 Iustin Pop
1180 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1181 a8083063 Iustin Pop
1182 a8083063 Iustin Pop
  if done:
1183 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1184 a8083063 Iustin Pop
  return not cumul_degraded
1185 a8083063 Iustin Pop
1186 a8083063 Iustin Pop
1187 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1188 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1189 a8083063 Iustin Pop

1190 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1191 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1192 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1193 0834c866 Iustin Pop

1194 a8083063 Iustin Pop
  """
1195 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1196 0834c866 Iustin Pop
  if ldisk:
1197 0834c866 Iustin Pop
    idx = 6
1198 0834c866 Iustin Pop
  else:
1199 0834c866 Iustin Pop
    idx = 5
1200 a8083063 Iustin Pop
1201 a8083063 Iustin Pop
  result = True
1202 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1203 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1204 a8083063 Iustin Pop
    if not rstats:
1205 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1206 a8083063 Iustin Pop
      result = False
1207 a8083063 Iustin Pop
    else:
1208 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1209 a8083063 Iustin Pop
  if dev.children:
1210 a8083063 Iustin Pop
    for child in dev.children:
1211 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1212 a8083063 Iustin Pop
1213 a8083063 Iustin Pop
  return result
1214 a8083063 Iustin Pop
1215 a8083063 Iustin Pop
1216 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1217 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1218 a8083063 Iustin Pop

1219 a8083063 Iustin Pop
  """
1220 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1221 a8083063 Iustin Pop
1222 a8083063 Iustin Pop
  def CheckPrereq(self):
1223 a8083063 Iustin Pop
    """Check prerequisites.
1224 a8083063 Iustin Pop

1225 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1226 a8083063 Iustin Pop

1227 a8083063 Iustin Pop
    """
1228 1f9430d6 Iustin Pop
    if self.op.names:
1229 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1230 1f9430d6 Iustin Pop
1231 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1232 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1233 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1234 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1235 1f9430d6 Iustin Pop
1236 1f9430d6 Iustin Pop
  @staticmethod
1237 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1238 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1239 1f9430d6 Iustin Pop

1240 1f9430d6 Iustin Pop
      Args:
1241 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1242 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1243 1f9430d6 Iustin Pop

1244 1f9430d6 Iustin Pop
      Returns:
1245 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1246 1f9430d6 Iustin Pop
             nodes as
1247 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1248 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1249 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1250 1f9430d6 Iustin Pop
                  }
1251 1f9430d6 Iustin Pop

1252 1f9430d6 Iustin Pop
    """
1253 1f9430d6 Iustin Pop
    all_os = {}
1254 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1255 1f9430d6 Iustin Pop
      if not nr:
1256 1f9430d6 Iustin Pop
        continue
1257 b4de68a9 Iustin Pop
      for os_obj in nr:
1258 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1259 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1260 1f9430d6 Iustin Pop
          # for each node in node_list
1261 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1262 1f9430d6 Iustin Pop
          for nname in node_list:
1263 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1264 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1265 1f9430d6 Iustin Pop
    return all_os
1266 a8083063 Iustin Pop
1267 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1268 a8083063 Iustin Pop
    """Compute the list of OSes.
1269 a8083063 Iustin Pop

1270 a8083063 Iustin Pop
    """
1271 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1272 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1273 a8083063 Iustin Pop
    if node_data == False:
1274 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1275 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1276 1f9430d6 Iustin Pop
    output = []
1277 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1278 1f9430d6 Iustin Pop
      row = []
1279 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1280 1f9430d6 Iustin Pop
        if field == "name":
1281 1f9430d6 Iustin Pop
          val = os_name
1282 1f9430d6 Iustin Pop
        elif field == "valid":
1283 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1284 1f9430d6 Iustin Pop
        elif field == "node_status":
1285 1f9430d6 Iustin Pop
          val = {}
1286 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1287 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1288 1f9430d6 Iustin Pop
        else:
1289 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1290 1f9430d6 Iustin Pop
        row.append(val)
1291 1f9430d6 Iustin Pop
      output.append(row)
1292 1f9430d6 Iustin Pop
1293 1f9430d6 Iustin Pop
    return output
1294 a8083063 Iustin Pop
1295 a8083063 Iustin Pop
1296 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1297 a8083063 Iustin Pop
  """Logical unit for removing a node.
1298 a8083063 Iustin Pop

1299 a8083063 Iustin Pop
  """
1300 a8083063 Iustin Pop
  HPATH = "node-remove"
1301 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1302 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1303 a8083063 Iustin Pop
1304 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1305 a8083063 Iustin Pop
    """Build hooks env.
1306 a8083063 Iustin Pop

1307 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1308 d08869ee Guido Trotter
    node would then be impossible to remove.
1309 a8083063 Iustin Pop

1310 a8083063 Iustin Pop
    """
1311 396e1b78 Michael Hanselmann
    env = {
1312 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1313 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1314 396e1b78 Michael Hanselmann
      }
1315 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1316 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1317 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1318 a8083063 Iustin Pop
1319 a8083063 Iustin Pop
  def CheckPrereq(self):
1320 a8083063 Iustin Pop
    """Check prerequisites.
1321 a8083063 Iustin Pop

1322 a8083063 Iustin Pop
    This checks:
1323 a8083063 Iustin Pop
     - the node exists in the configuration
1324 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1325 a8083063 Iustin Pop
     - it's not the master
1326 a8083063 Iustin Pop

1327 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1328 a8083063 Iustin Pop

1329 a8083063 Iustin Pop
    """
1330 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1331 a8083063 Iustin Pop
    if node is None:
1332 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1333 a8083063 Iustin Pop
1334 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1335 a8083063 Iustin Pop
1336 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1337 a8083063 Iustin Pop
    if node.name == masternode:
1338 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1339 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1340 a8083063 Iustin Pop
1341 a8083063 Iustin Pop
    for instance_name in instance_list:
1342 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1343 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1344 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1345 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1346 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1347 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1348 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1349 a8083063 Iustin Pop
    self.op.node_name = node.name
1350 a8083063 Iustin Pop
    self.node = node
1351 a8083063 Iustin Pop
1352 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1353 a8083063 Iustin Pop
    """Removes the node from the cluster.
1354 a8083063 Iustin Pop

1355 a8083063 Iustin Pop
    """
1356 a8083063 Iustin Pop
    node = self.node
1357 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1358 a8083063 Iustin Pop
                node.name)
1359 a8083063 Iustin Pop
1360 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1361 a8083063 Iustin Pop
1362 d8470559 Michael Hanselmann
    rpc.call_node_leave_cluster(node.name)
1363 c8a0948f Michael Hanselmann
1364 a8083063 Iustin Pop
1365 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1366 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1367 a8083063 Iustin Pop

1368 a8083063 Iustin Pop
  """
1369 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1370 35705d8f Guido Trotter
  REQ_BGL = False
1371 a8083063 Iustin Pop
1372 35705d8f Guido Trotter
  def ExpandNames(self):
1373 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1374 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1375 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1376 e8a4c138 Iustin Pop
      "bootid",
1377 e8a4c138 Iustin Pop
      "ctotal",
1378 e8a4c138 Iustin Pop
      ])
1379 a8083063 Iustin Pop
1380 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1381 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1382 130a6a6f Iustin Pop
                               "pip", "sip", "tags"],
1383 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1384 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1385 a8083063 Iustin Pop
1386 35705d8f Guido Trotter
    self.needed_locks = {}
1387 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1388 35705d8f Guido Trotter
    # TODO: we could lock nodes only if the user asked for dynamic fields. For
1389 35705d8f Guido Trotter
    # that we need atomic ways to get info for a group of nodes from the
1390 35705d8f Guido Trotter
    # config, though.
1391 35705d8f Guido Trotter
    if not self.op.names:
1392 35705d8f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = None
1393 35705d8f Guido Trotter
    else:
1394 b91a34a5 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1395 b91a34a5 Guido Trotter
        _GetWantedNodes(self, self.op.names)
1396 35705d8f Guido Trotter
1397 35705d8f Guido Trotter
  def CheckPrereq(self):
1398 35705d8f Guido Trotter
    """Check prerequisites.
1399 35705d8f Guido Trotter

1400 35705d8f Guido Trotter
    """
1401 35705d8f Guido Trotter
    # This of course is valid only if we locked the nodes
1402 6683bba2 Guido Trotter
    self.wanted = self.acquired_locks[locking.LEVEL_NODE]
1403 a8083063 Iustin Pop
1404 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1405 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1406 a8083063 Iustin Pop

1407 a8083063 Iustin Pop
    """
1408 246e180a Iustin Pop
    nodenames = self.wanted
1409 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1410 a8083063 Iustin Pop
1411 a8083063 Iustin Pop
    # begin data gathering
1412 a8083063 Iustin Pop
1413 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1414 a8083063 Iustin Pop
      live_data = {}
1415 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1416 a8083063 Iustin Pop
      for name in nodenames:
1417 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1418 a8083063 Iustin Pop
        if nodeinfo:
1419 a8083063 Iustin Pop
          live_data[name] = {
1420 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1421 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1422 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1423 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1424 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1425 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1426 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1427 a8083063 Iustin Pop
            }
1428 a8083063 Iustin Pop
        else:
1429 a8083063 Iustin Pop
          live_data[name] = {}
1430 a8083063 Iustin Pop
    else:
1431 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1432 a8083063 Iustin Pop
1433 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1434 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1435 a8083063 Iustin Pop
1436 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1437 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1438 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1439 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1440 a8083063 Iustin Pop
1441 ec223efb Iustin Pop
      for instance_name in instancelist:
1442 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1443 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1444 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1445 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1446 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1447 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1448 a8083063 Iustin Pop
1449 a8083063 Iustin Pop
    # end data gathering
1450 a8083063 Iustin Pop
1451 a8083063 Iustin Pop
    output = []
1452 a8083063 Iustin Pop
    for node in nodelist:
1453 a8083063 Iustin Pop
      node_output = []
1454 a8083063 Iustin Pop
      for field in self.op.output_fields:
1455 a8083063 Iustin Pop
        if field == "name":
1456 a8083063 Iustin Pop
          val = node.name
1457 ec223efb Iustin Pop
        elif field == "pinst_list":
1458 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1459 ec223efb Iustin Pop
        elif field == "sinst_list":
1460 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1461 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1462 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1463 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1464 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1465 a8083063 Iustin Pop
        elif field == "pip":
1466 a8083063 Iustin Pop
          val = node.primary_ip
1467 a8083063 Iustin Pop
        elif field == "sip":
1468 a8083063 Iustin Pop
          val = node.secondary_ip
1469 130a6a6f Iustin Pop
        elif field == "tags":
1470 130a6a6f Iustin Pop
          val = list(node.GetTags())
1471 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1472 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1473 a8083063 Iustin Pop
        else:
1474 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1475 a8083063 Iustin Pop
        node_output.append(val)
1476 a8083063 Iustin Pop
      output.append(node_output)
1477 a8083063 Iustin Pop
1478 a8083063 Iustin Pop
    return output
1479 a8083063 Iustin Pop
1480 a8083063 Iustin Pop
1481 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1482 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1483 dcb93971 Michael Hanselmann

1484 dcb93971 Michael Hanselmann
  """
1485 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1486 dcb93971 Michael Hanselmann
1487 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1488 dcb93971 Michael Hanselmann
    """Check prerequisites.
1489 dcb93971 Michael Hanselmann

1490 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1491 dcb93971 Michael Hanselmann

1492 dcb93971 Michael Hanselmann
    """
1493 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1494 dcb93971 Michael Hanselmann
1495 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1496 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1497 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1498 dcb93971 Michael Hanselmann
1499 dcb93971 Michael Hanselmann
1500 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1501 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1502 dcb93971 Michael Hanselmann

1503 dcb93971 Michael Hanselmann
    """
1504 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1505 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1506 dcb93971 Michael Hanselmann
1507 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1508 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1509 dcb93971 Michael Hanselmann
1510 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1511 dcb93971 Michael Hanselmann
1512 dcb93971 Michael Hanselmann
    output = []
1513 dcb93971 Michael Hanselmann
    for node in nodenames:
1514 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1515 37d19eb2 Michael Hanselmann
        continue
1516 37d19eb2 Michael Hanselmann
1517 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1518 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1519 dcb93971 Michael Hanselmann
1520 dcb93971 Michael Hanselmann
      for vol in node_vols:
1521 dcb93971 Michael Hanselmann
        node_output = []
1522 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1523 dcb93971 Michael Hanselmann
          if field == "node":
1524 dcb93971 Michael Hanselmann
            val = node
1525 dcb93971 Michael Hanselmann
          elif field == "phys":
1526 dcb93971 Michael Hanselmann
            val = vol['dev']
1527 dcb93971 Michael Hanselmann
          elif field == "vg":
1528 dcb93971 Michael Hanselmann
            val = vol['vg']
1529 dcb93971 Michael Hanselmann
          elif field == "name":
1530 dcb93971 Michael Hanselmann
            val = vol['name']
1531 dcb93971 Michael Hanselmann
          elif field == "size":
1532 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1533 dcb93971 Michael Hanselmann
          elif field == "instance":
1534 dcb93971 Michael Hanselmann
            for inst in ilist:
1535 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1536 dcb93971 Michael Hanselmann
                continue
1537 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1538 dcb93971 Michael Hanselmann
                val = inst.name
1539 dcb93971 Michael Hanselmann
                break
1540 dcb93971 Michael Hanselmann
            else:
1541 dcb93971 Michael Hanselmann
              val = '-'
1542 dcb93971 Michael Hanselmann
          else:
1543 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1544 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1545 dcb93971 Michael Hanselmann
1546 dcb93971 Michael Hanselmann
        output.append(node_output)
1547 dcb93971 Michael Hanselmann
1548 dcb93971 Michael Hanselmann
    return output
1549 dcb93971 Michael Hanselmann
1550 dcb93971 Michael Hanselmann
1551 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1552 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1553 a8083063 Iustin Pop

1554 a8083063 Iustin Pop
  """
1555 a8083063 Iustin Pop
  HPATH = "node-add"
1556 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1557 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1558 a8083063 Iustin Pop
1559 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1560 a8083063 Iustin Pop
    """Build hooks env.
1561 a8083063 Iustin Pop

1562 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1563 a8083063 Iustin Pop

1564 a8083063 Iustin Pop
    """
1565 a8083063 Iustin Pop
    env = {
1566 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1567 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1568 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1569 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1570 a8083063 Iustin Pop
      }
1571 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1572 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1573 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1574 a8083063 Iustin Pop
1575 a8083063 Iustin Pop
  def CheckPrereq(self):
1576 a8083063 Iustin Pop
    """Check prerequisites.
1577 a8083063 Iustin Pop

1578 a8083063 Iustin Pop
    This checks:
1579 a8083063 Iustin Pop
     - the new node is not already in the config
1580 a8083063 Iustin Pop
     - it is resolvable
1581 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1582 a8083063 Iustin Pop

1583 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1584 a8083063 Iustin Pop

1585 a8083063 Iustin Pop
    """
1586 a8083063 Iustin Pop
    node_name = self.op.node_name
1587 a8083063 Iustin Pop
    cfg = self.cfg
1588 a8083063 Iustin Pop
1589 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1590 a8083063 Iustin Pop
1591 bcf043c9 Iustin Pop
    node = dns_data.name
1592 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1593 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1594 a8083063 Iustin Pop
    if secondary_ip is None:
1595 a8083063 Iustin Pop
      secondary_ip = primary_ip
1596 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1597 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1598 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1599 e7c6e02b Michael Hanselmann
1600 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1601 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1602 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1603 e7c6e02b Michael Hanselmann
                                 node)
1604 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1605 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1606 a8083063 Iustin Pop
1607 a8083063 Iustin Pop
    for existing_node_name in node_list:
1608 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1609 e7c6e02b Michael Hanselmann
1610 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1611 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1612 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1613 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1614 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1615 e7c6e02b Michael Hanselmann
        continue
1616 e7c6e02b Michael Hanselmann
1617 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1618 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1619 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1620 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1621 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1622 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1623 a8083063 Iustin Pop
1624 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1625 a8083063 Iustin Pop
    # same as for the master
1626 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1627 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1628 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1629 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1630 a8083063 Iustin Pop
      if master_singlehomed:
1631 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1632 3ecf6786 Iustin Pop
                                   " new node has one")
1633 a8083063 Iustin Pop
      else:
1634 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1635 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1636 a8083063 Iustin Pop
1637 a8083063 Iustin Pop
    # checks reachablity
1638 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1639 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1640 a8083063 Iustin Pop
1641 a8083063 Iustin Pop
    if not newbie_singlehomed:
1642 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1643 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1644 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1645 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1646 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1647 a8083063 Iustin Pop
1648 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1649 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1650 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1651 a8083063 Iustin Pop
1652 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1653 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1654 a8083063 Iustin Pop

1655 a8083063 Iustin Pop
    """
1656 a8083063 Iustin Pop
    new_node = self.new_node
1657 a8083063 Iustin Pop
    node = new_node.name
1658 a8083063 Iustin Pop
1659 a8083063 Iustin Pop
    # check connectivity
1660 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1661 a8083063 Iustin Pop
    if result:
1662 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1663 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1664 a8083063 Iustin Pop
                    (node, result))
1665 a8083063 Iustin Pop
      else:
1666 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1667 3ecf6786 Iustin Pop
                                 " node version %s" %
1668 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1669 a8083063 Iustin Pop
    else:
1670 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1671 a8083063 Iustin Pop
1672 a8083063 Iustin Pop
    # setup ssh on node
1673 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1674 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1675 a8083063 Iustin Pop
    keyarray = []
1676 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1677 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1678 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1679 a8083063 Iustin Pop
1680 a8083063 Iustin Pop
    for i in keyfiles:
1681 a8083063 Iustin Pop
      f = open(i, 'r')
1682 a8083063 Iustin Pop
      try:
1683 a8083063 Iustin Pop
        keyarray.append(f.read())
1684 a8083063 Iustin Pop
      finally:
1685 a8083063 Iustin Pop
        f.close()
1686 a8083063 Iustin Pop
1687 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1688 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1689 a8083063 Iustin Pop
1690 a8083063 Iustin Pop
    if not result:
1691 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1692 a8083063 Iustin Pop
1693 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1694 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1695 c8a0948f Michael Hanselmann
1696 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1697 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1698 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1699 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1700 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1701 16abfbc2 Alexander Schreiber
                                    10, False):
1702 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1703 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1704 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1705 a8083063 Iustin Pop
1706 5c0527ed Guido Trotter
    node_verify_list = [self.sstore.GetMasterNode()]
1707 5c0527ed Guido Trotter
    node_verify_param = {
1708 5c0527ed Guido Trotter
      'nodelist': [node],
1709 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1710 5c0527ed Guido Trotter
    }
1711 5c0527ed Guido Trotter
1712 5c0527ed Guido Trotter
    result = rpc.call_node_verify(node_verify_list, node_verify_param)
1713 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1714 5c0527ed Guido Trotter
      if not result[verifier]:
1715 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1716 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1717 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1718 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1719 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1720 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1721 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1722 ff98055b Iustin Pop
1723 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1724 a8083063 Iustin Pop
    # including the node just added
1725 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1726 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1727 102b115b Michael Hanselmann
    if not self.op.readd:
1728 102b115b Michael Hanselmann
      dist_nodes.append(node)
1729 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1730 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1731 a8083063 Iustin Pop
1732 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1733 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1734 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1735 a8083063 Iustin Pop
      for to_node in dist_nodes:
1736 a8083063 Iustin Pop
        if not result[to_node]:
1737 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1738 a8083063 Iustin Pop
                       (fname, to_node))
1739 a8083063 Iustin Pop
1740 3d1e7706 Guido Trotter
    to_copy = self.sstore.GetFileList()
1741 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1742 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1743 a8083063 Iustin Pop
    for fname in to_copy:
1744 b5602d15 Guido Trotter
      result = rpc.call_upload_file([node], fname)
1745 b5602d15 Guido Trotter
      if not result[node]:
1746 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1747 a8083063 Iustin Pop
1748 d8470559 Michael Hanselmann
    if self.op.readd:
1749 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
1750 d8470559 Michael Hanselmann
    else:
1751 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
1752 a8083063 Iustin Pop
1753 a8083063 Iustin Pop
1754 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1755 a8083063 Iustin Pop
  """Query cluster configuration.
1756 a8083063 Iustin Pop

1757 a8083063 Iustin Pop
  """
1758 a8083063 Iustin Pop
  _OP_REQP = []
1759 59322403 Iustin Pop
  REQ_MASTER = False
1760 642339cf Guido Trotter
  REQ_BGL = False
1761 642339cf Guido Trotter
1762 642339cf Guido Trotter
  def ExpandNames(self):
1763 642339cf Guido Trotter
    self.needed_locks = {}
1764 a8083063 Iustin Pop
1765 a8083063 Iustin Pop
  def CheckPrereq(self):
1766 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1767 a8083063 Iustin Pop

1768 a8083063 Iustin Pop
    """
1769 a8083063 Iustin Pop
    pass
1770 a8083063 Iustin Pop
1771 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1772 a8083063 Iustin Pop
    """Return cluster config.
1773 a8083063 Iustin Pop

1774 a8083063 Iustin Pop
    """
1775 a8083063 Iustin Pop
    result = {
1776 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1777 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1778 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1779 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1780 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1781 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1782 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1783 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1784 8a12ce45 Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
1785 a8083063 Iustin Pop
      }
1786 a8083063 Iustin Pop
1787 a8083063 Iustin Pop
    return result
1788 a8083063 Iustin Pop
1789 a8083063 Iustin Pop
1790 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1791 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1792 a8083063 Iustin Pop

1793 a8083063 Iustin Pop
  """
1794 a8083063 Iustin Pop
  _OP_REQP = []
1795 642339cf Guido Trotter
  REQ_BGL = False
1796 642339cf Guido Trotter
1797 642339cf Guido Trotter
  def ExpandNames(self):
1798 642339cf Guido Trotter
    self.needed_locks = {}
1799 a8083063 Iustin Pop
1800 a8083063 Iustin Pop
  def CheckPrereq(self):
1801 a8083063 Iustin Pop
    """No prerequisites.
1802 a8083063 Iustin Pop

1803 a8083063 Iustin Pop
    """
1804 a8083063 Iustin Pop
    pass
1805 a8083063 Iustin Pop
1806 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1807 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1808 a8083063 Iustin Pop

1809 a8083063 Iustin Pop
    """
1810 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1811 a8083063 Iustin Pop
1812 a8083063 Iustin Pop
1813 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1814 a8083063 Iustin Pop
  """Bring up an instance's disks.
1815 a8083063 Iustin Pop

1816 a8083063 Iustin Pop
  """
1817 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1818 a8083063 Iustin Pop
1819 a8083063 Iustin Pop
  def CheckPrereq(self):
1820 a8083063 Iustin Pop
    """Check prerequisites.
1821 a8083063 Iustin Pop

1822 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1823 a8083063 Iustin Pop

1824 a8083063 Iustin Pop
    """
1825 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1826 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1827 a8083063 Iustin Pop
    if instance is None:
1828 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1829 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1830 a8083063 Iustin Pop
    self.instance = instance
1831 a8083063 Iustin Pop
1832 a8083063 Iustin Pop
1833 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1834 a8083063 Iustin Pop
    """Activate the disks.
1835 a8083063 Iustin Pop

1836 a8083063 Iustin Pop
    """
1837 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1838 a8083063 Iustin Pop
    if not disks_ok:
1839 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1840 a8083063 Iustin Pop
1841 a8083063 Iustin Pop
    return disks_info
1842 a8083063 Iustin Pop
1843 a8083063 Iustin Pop
1844 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1845 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1846 a8083063 Iustin Pop

1847 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1848 a8083063 Iustin Pop

1849 a8083063 Iustin Pop
  Args:
1850 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1851 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1852 a8083063 Iustin Pop
                        in an error return from the function
1853 a8083063 Iustin Pop

1854 a8083063 Iustin Pop
  Returns:
1855 a8083063 Iustin Pop
    false if the operation failed
1856 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1857 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1858 a8083063 Iustin Pop
  """
1859 a8083063 Iustin Pop
  device_info = []
1860 a8083063 Iustin Pop
  disks_ok = True
1861 fdbd668d Iustin Pop
  iname = instance.name
1862 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1863 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1864 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1865 fdbd668d Iustin Pop
1866 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1867 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1868 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1869 fdbd668d Iustin Pop
  # SyncSource, etc.)
1870 fdbd668d Iustin Pop
1871 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1872 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1873 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1874 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1875 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1876 a8083063 Iustin Pop
      if not result:
1877 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1878 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1879 fdbd668d Iustin Pop
        if not ignore_secondaries:
1880 a8083063 Iustin Pop
          disks_ok = False
1881 fdbd668d Iustin Pop
1882 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1883 fdbd668d Iustin Pop
1884 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1885 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1886 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1887 fdbd668d Iustin Pop
      if node != instance.primary_node:
1888 fdbd668d Iustin Pop
        continue
1889 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
1890 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1891 fdbd668d Iustin Pop
      if not result:
1892 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1893 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1894 fdbd668d Iustin Pop
        disks_ok = False
1895 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1896 a8083063 Iustin Pop
1897 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1898 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1899 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1900 b352ab5b Iustin Pop
  for disk in instance.disks:
1901 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1902 b352ab5b Iustin Pop
1903 a8083063 Iustin Pop
  return disks_ok, device_info
1904 a8083063 Iustin Pop
1905 a8083063 Iustin Pop
1906 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1907 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1908 3ecf6786 Iustin Pop

1909 3ecf6786 Iustin Pop
  """
1910 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1911 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1912 fe7b0351 Michael Hanselmann
  if not disks_ok:
1913 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1914 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1915 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1916 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1917 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1918 fe7b0351 Michael Hanselmann
1919 fe7b0351 Michael Hanselmann
1920 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1921 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1922 a8083063 Iustin Pop

1923 a8083063 Iustin Pop
  """
1924 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1925 a8083063 Iustin Pop
1926 a8083063 Iustin Pop
  def CheckPrereq(self):
1927 a8083063 Iustin Pop
    """Check prerequisites.
1928 a8083063 Iustin Pop

1929 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1930 a8083063 Iustin Pop

1931 a8083063 Iustin Pop
    """
1932 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1933 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1934 a8083063 Iustin Pop
    if instance is None:
1935 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1936 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1937 a8083063 Iustin Pop
    self.instance = instance
1938 a8083063 Iustin Pop
1939 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1940 a8083063 Iustin Pop
    """Deactivate the disks
1941 a8083063 Iustin Pop

1942 a8083063 Iustin Pop
    """
1943 a8083063 Iustin Pop
    instance = self.instance
1944 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1945 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1946 a8083063 Iustin Pop
    if not type(ins_l) is list:
1947 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1948 3ecf6786 Iustin Pop
                               instance.primary_node)
1949 a8083063 Iustin Pop
1950 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1951 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1952 3ecf6786 Iustin Pop
                               " block devices.")
1953 a8083063 Iustin Pop
1954 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1955 a8083063 Iustin Pop
1956 a8083063 Iustin Pop
1957 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1958 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1959 a8083063 Iustin Pop

1960 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1961 a8083063 Iustin Pop

1962 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1963 a8083063 Iustin Pop
  ignored.
1964 a8083063 Iustin Pop

1965 a8083063 Iustin Pop
  """
1966 a8083063 Iustin Pop
  result = True
1967 a8083063 Iustin Pop
  for disk in instance.disks:
1968 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1969 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1970 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1971 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1972 a8083063 Iustin Pop
                     (disk.iv_name, node))
1973 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1974 a8083063 Iustin Pop
          result = False
1975 a8083063 Iustin Pop
  return result
1976 a8083063 Iustin Pop
1977 a8083063 Iustin Pop
1978 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
1979 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
1980 d4f16fd9 Iustin Pop

1981 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
1982 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
1983 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
1984 d4f16fd9 Iustin Pop
  exception.
1985 d4f16fd9 Iustin Pop

1986 d4f16fd9 Iustin Pop
  Args:
1987 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
1988 d4f16fd9 Iustin Pop
    - node: the node name
1989 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
1990 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
1991 d4f16fd9 Iustin Pop

1992 d4f16fd9 Iustin Pop
  """
1993 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
1994 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
1995 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
1996 d4f16fd9 Iustin Pop
                             " information" % (node,))
1997 d4f16fd9 Iustin Pop
1998 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
1999 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2000 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2001 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2002 d4f16fd9 Iustin Pop
  if requested > free_mem:
2003 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2004 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2005 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2006 d4f16fd9 Iustin Pop
2007 d4f16fd9 Iustin Pop
2008 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2009 a8083063 Iustin Pop
  """Starts an instance.
2010 a8083063 Iustin Pop

2011 a8083063 Iustin Pop
  """
2012 a8083063 Iustin Pop
  HPATH = "instance-start"
2013 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2014 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2015 e873317a Guido Trotter
  REQ_BGL = False
2016 e873317a Guido Trotter
2017 e873317a Guido Trotter
  def ExpandNames(self):
2018 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2019 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2020 e873317a Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2021 e873317a Guido Trotter
2022 e873317a Guido Trotter
  def DeclareLocks(self, level):
2023 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2024 e873317a Guido Trotter
      self._LockInstancesNodes()
2025 a8083063 Iustin Pop
2026 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2027 a8083063 Iustin Pop
    """Build hooks env.
2028 a8083063 Iustin Pop

2029 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2030 a8083063 Iustin Pop

2031 a8083063 Iustin Pop
    """
2032 a8083063 Iustin Pop
    env = {
2033 a8083063 Iustin Pop
      "FORCE": self.op.force,
2034 a8083063 Iustin Pop
      }
2035 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2036 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2037 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2038 a8083063 Iustin Pop
    return env, nl, nl
2039 a8083063 Iustin Pop
2040 a8083063 Iustin Pop
  def CheckPrereq(self):
2041 a8083063 Iustin Pop
    """Check prerequisites.
2042 a8083063 Iustin Pop

2043 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2044 a8083063 Iustin Pop

2045 a8083063 Iustin Pop
    """
2046 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2047 e873317a Guido Trotter
    assert self.instance is not None, \
2048 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2049 a8083063 Iustin Pop
2050 a8083063 Iustin Pop
    # check bridges existance
2051 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2052 a8083063 Iustin Pop
2053 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2054 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2055 d4f16fd9 Iustin Pop
                         instance.memory)
2056 d4f16fd9 Iustin Pop
2057 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2058 a8083063 Iustin Pop
    """Start the instance.
2059 a8083063 Iustin Pop

2060 a8083063 Iustin Pop
    """
2061 a8083063 Iustin Pop
    instance = self.instance
2062 a8083063 Iustin Pop
    force = self.op.force
2063 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2064 a8083063 Iustin Pop
2065 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2066 fe482621 Iustin Pop
2067 a8083063 Iustin Pop
    node_current = instance.primary_node
2068 a8083063 Iustin Pop
2069 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2070 a8083063 Iustin Pop
2071 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2072 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2073 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2074 a8083063 Iustin Pop
2075 a8083063 Iustin Pop
2076 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2077 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2078 bf6929a2 Alexander Schreiber

2079 bf6929a2 Alexander Schreiber
  """
2080 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2081 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2082 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2083 e873317a Guido Trotter
  REQ_BGL = False
2084 e873317a Guido Trotter
2085 e873317a Guido Trotter
  def ExpandNames(self):
2086 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2087 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2088 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2089 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2090 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2091 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2092 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2093 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2094 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2095 e873317a Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2096 e873317a Guido Trotter
2097 e873317a Guido Trotter
  def DeclareLocks(self, level):
2098 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2099 0fcc5db3 Guido Trotter
      # FIXME: lock only primary on (not constants.INSTANCE_REBOOT_FULL)
2100 e873317a Guido Trotter
      self._LockInstancesNodes()
2101 bf6929a2 Alexander Schreiber
2102 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2103 bf6929a2 Alexander Schreiber
    """Build hooks env.
2104 bf6929a2 Alexander Schreiber

2105 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2106 bf6929a2 Alexander Schreiber

2107 bf6929a2 Alexander Schreiber
    """
2108 bf6929a2 Alexander Schreiber
    env = {
2109 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2110 bf6929a2 Alexander Schreiber
      }
2111 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2112 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2113 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2114 bf6929a2 Alexander Schreiber
    return env, nl, nl
2115 bf6929a2 Alexander Schreiber
2116 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2117 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2118 bf6929a2 Alexander Schreiber

2119 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2120 bf6929a2 Alexander Schreiber

2121 bf6929a2 Alexander Schreiber
    """
2122 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2123 e873317a Guido Trotter
    assert self.instance is not None, \
2124 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2125 bf6929a2 Alexander Schreiber
2126 bf6929a2 Alexander Schreiber
    # check bridges existance
2127 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2128 bf6929a2 Alexander Schreiber
2129 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2130 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2131 bf6929a2 Alexander Schreiber

2132 bf6929a2 Alexander Schreiber
    """
2133 bf6929a2 Alexander Schreiber
    instance = self.instance
2134 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2135 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2136 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2137 bf6929a2 Alexander Schreiber
2138 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2139 bf6929a2 Alexander Schreiber
2140 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2141 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2142 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2143 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2144 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2145 bf6929a2 Alexander Schreiber
    else:
2146 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2147 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2148 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2149 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2150 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2151 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2152 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2153 bf6929a2 Alexander Schreiber
2154 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2155 bf6929a2 Alexander Schreiber
2156 bf6929a2 Alexander Schreiber
2157 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2158 a8083063 Iustin Pop
  """Shutdown an instance.
2159 a8083063 Iustin Pop

2160 a8083063 Iustin Pop
  """
2161 a8083063 Iustin Pop
  HPATH = "instance-stop"
2162 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2163 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2164 e873317a Guido Trotter
  REQ_BGL = False
2165 e873317a Guido Trotter
2166 e873317a Guido Trotter
  def ExpandNames(self):
2167 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2168 e873317a Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2169 e873317a Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2170 e873317a Guido Trotter
2171 e873317a Guido Trotter
  def DeclareLocks(self, level):
2172 e873317a Guido Trotter
    if level == locking.LEVEL_NODE:
2173 e873317a Guido Trotter
      self._LockInstancesNodes()
2174 a8083063 Iustin Pop
2175 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2176 a8083063 Iustin Pop
    """Build hooks env.
2177 a8083063 Iustin Pop

2178 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2179 a8083063 Iustin Pop

2180 a8083063 Iustin Pop
    """
2181 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2182 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2183 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2184 a8083063 Iustin Pop
    return env, nl, nl
2185 a8083063 Iustin Pop
2186 a8083063 Iustin Pop
  def CheckPrereq(self):
2187 a8083063 Iustin Pop
    """Check prerequisites.
2188 a8083063 Iustin Pop

2189 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2190 a8083063 Iustin Pop

2191 a8083063 Iustin Pop
    """
2192 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2193 e873317a Guido Trotter
    assert self.instance is not None, \
2194 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2195 a8083063 Iustin Pop
2196 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2197 a8083063 Iustin Pop
    """Shutdown the instance.
2198 a8083063 Iustin Pop

2199 a8083063 Iustin Pop
    """
2200 a8083063 Iustin Pop
    instance = self.instance
2201 a8083063 Iustin Pop
    node_current = instance.primary_node
2202 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2203 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2204 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2205 a8083063 Iustin Pop
2206 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2207 a8083063 Iustin Pop
2208 a8083063 Iustin Pop
2209 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2210 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2211 fe7b0351 Michael Hanselmann

2212 fe7b0351 Michael Hanselmann
  """
2213 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2214 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2215 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2216 4e0b4d2d Guido Trotter
  REQ_BGL = False
2217 4e0b4d2d Guido Trotter
2218 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2219 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2220 4e0b4d2d Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2221 4e0b4d2d Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2222 4e0b4d2d Guido Trotter
2223 4e0b4d2d Guido Trotter
  def DeclareLocks(self, level):
2224 4e0b4d2d Guido Trotter
    if level == locking.LEVEL_NODE:
2225 4e0b4d2d Guido Trotter
      self._LockInstancesNodes()
2226 fe7b0351 Michael Hanselmann
2227 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2228 fe7b0351 Michael Hanselmann
    """Build hooks env.
2229 fe7b0351 Michael Hanselmann

2230 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2231 fe7b0351 Michael Hanselmann

2232 fe7b0351 Michael Hanselmann
    """
2233 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2234 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2235 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2236 fe7b0351 Michael Hanselmann
    return env, nl, nl
2237 fe7b0351 Michael Hanselmann
2238 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2239 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2240 fe7b0351 Michael Hanselmann

2241 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2242 fe7b0351 Michael Hanselmann

2243 fe7b0351 Michael Hanselmann
    """
2244 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2245 4e0b4d2d Guido Trotter
    assert instance is not None, \
2246 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2247 4e0b4d2d Guido Trotter
2248 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2249 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2250 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2251 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2252 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2253 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2254 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2255 fe7b0351 Michael Hanselmann
    if remote_info:
2256 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2257 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2258 3ecf6786 Iustin Pop
                                  instance.primary_node))
2259 d0834de3 Michael Hanselmann
2260 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2261 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2262 d0834de3 Michael Hanselmann
      # OS verification
2263 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2264 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2265 d0834de3 Michael Hanselmann
      if pnode is None:
2266 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2267 3ecf6786 Iustin Pop
                                   self.op.pnode)
2268 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2269 dfa96ded Guido Trotter
      if not os_obj:
2270 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2271 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2272 d0834de3 Michael Hanselmann
2273 fe7b0351 Michael Hanselmann
    self.instance = instance
2274 fe7b0351 Michael Hanselmann
2275 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2276 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2277 fe7b0351 Michael Hanselmann

2278 fe7b0351 Michael Hanselmann
    """
2279 fe7b0351 Michael Hanselmann
    inst = self.instance
2280 fe7b0351 Michael Hanselmann
2281 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2282 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2283 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2284 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2285 d0834de3 Michael Hanselmann
2286 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2287 fe7b0351 Michael Hanselmann
    try:
2288 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2289 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2290 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2291 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2292 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2293 fe7b0351 Michael Hanselmann
    finally:
2294 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2295 fe7b0351 Michael Hanselmann
2296 fe7b0351 Michael Hanselmann
2297 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2298 decd5f45 Iustin Pop
  """Rename an instance.
2299 decd5f45 Iustin Pop

2300 decd5f45 Iustin Pop
  """
2301 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2302 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2303 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2304 decd5f45 Iustin Pop
2305 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2306 decd5f45 Iustin Pop
    """Build hooks env.
2307 decd5f45 Iustin Pop

2308 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2309 decd5f45 Iustin Pop

2310 decd5f45 Iustin Pop
    """
2311 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2312 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2313 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2314 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2315 decd5f45 Iustin Pop
    return env, nl, nl
2316 decd5f45 Iustin Pop
2317 decd5f45 Iustin Pop
  def CheckPrereq(self):
2318 decd5f45 Iustin Pop
    """Check prerequisites.
2319 decd5f45 Iustin Pop

2320 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2321 decd5f45 Iustin Pop

2322 decd5f45 Iustin Pop
    """
2323 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2324 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2325 decd5f45 Iustin Pop
    if instance is None:
2326 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2327 decd5f45 Iustin Pop
                                 self.op.instance_name)
2328 decd5f45 Iustin Pop
    if instance.status != "down":
2329 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2330 decd5f45 Iustin Pop
                                 self.op.instance_name)
2331 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2332 decd5f45 Iustin Pop
    if remote_info:
2333 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2334 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2335 decd5f45 Iustin Pop
                                  instance.primary_node))
2336 decd5f45 Iustin Pop
    self.instance = instance
2337 decd5f45 Iustin Pop
2338 decd5f45 Iustin Pop
    # new name verification
2339 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2340 decd5f45 Iustin Pop
2341 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2342 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2343 7bde3275 Guido Trotter
    if new_name in instance_list:
2344 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2345 c09f363f Manuel Franceschini
                                 new_name)
2346 7bde3275 Guido Trotter
2347 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2348 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2349 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2350 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2351 decd5f45 Iustin Pop
2352 decd5f45 Iustin Pop
2353 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2354 decd5f45 Iustin Pop
    """Reinstall the instance.
2355 decd5f45 Iustin Pop

2356 decd5f45 Iustin Pop
    """
2357 decd5f45 Iustin Pop
    inst = self.instance
2358 decd5f45 Iustin Pop
    old_name = inst.name
2359 decd5f45 Iustin Pop
2360 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2361 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2362 b23c4333 Manuel Franceschini
2363 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2364 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2365 74b5913f Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
2366 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2367 decd5f45 Iustin Pop
2368 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2369 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2370 decd5f45 Iustin Pop
2371 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2372 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2373 b23c4333 Manuel Franceschini
      result = rpc.call_file_storage_dir_rename(inst.primary_node,
2374 b23c4333 Manuel Franceschini
                                                old_file_storage_dir,
2375 b23c4333 Manuel Franceschini
                                                new_file_storage_dir)
2376 b23c4333 Manuel Franceschini
2377 b23c4333 Manuel Franceschini
      if not result:
2378 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2379 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2380 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2381 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2382 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2383 b23c4333 Manuel Franceschini
2384 b23c4333 Manuel Franceschini
      if not result[0]:
2385 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2386 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2387 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2388 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2389 b23c4333 Manuel Franceschini
2390 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2391 decd5f45 Iustin Pop
    try:
2392 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2393 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2394 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
2395 6291574d Alexander Schreiber
               " (but the instance has been renamed in Ganeti)" %
2396 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2397 decd5f45 Iustin Pop
        logger.Error(msg)
2398 decd5f45 Iustin Pop
    finally:
2399 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2400 decd5f45 Iustin Pop
2401 decd5f45 Iustin Pop
2402 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2403 a8083063 Iustin Pop
  """Remove an instance.
2404 a8083063 Iustin Pop

2405 a8083063 Iustin Pop
  """
2406 a8083063 Iustin Pop
  HPATH = "instance-remove"
2407 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2408 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2409 a8083063 Iustin Pop
2410 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2411 a8083063 Iustin Pop
    """Build hooks env.
2412 a8083063 Iustin Pop

2413 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2414 a8083063 Iustin Pop

2415 a8083063 Iustin Pop
    """
2416 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2417 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2418 a8083063 Iustin Pop
    return env, nl, nl
2419 a8083063 Iustin Pop
2420 a8083063 Iustin Pop
  def CheckPrereq(self):
2421 a8083063 Iustin Pop
    """Check prerequisites.
2422 a8083063 Iustin Pop

2423 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2424 a8083063 Iustin Pop

2425 a8083063 Iustin Pop
    """
2426 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2427 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2428 a8083063 Iustin Pop
    if instance is None:
2429 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2430 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2431 a8083063 Iustin Pop
    self.instance = instance
2432 a8083063 Iustin Pop
2433 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2434 a8083063 Iustin Pop
    """Remove the instance.
2435 a8083063 Iustin Pop

2436 a8083063 Iustin Pop
    """
2437 a8083063 Iustin Pop
    instance = self.instance
2438 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2439 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2440 a8083063 Iustin Pop
2441 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2442 1d67656e Iustin Pop
      if self.op.ignore_failures:
2443 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2444 1d67656e Iustin Pop
      else:
2445 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2446 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2447 a8083063 Iustin Pop
2448 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2449 a8083063 Iustin Pop
2450 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2451 1d67656e Iustin Pop
      if self.op.ignore_failures:
2452 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2453 1d67656e Iustin Pop
      else:
2454 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2455 a8083063 Iustin Pop
2456 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2457 a8083063 Iustin Pop
2458 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2459 a2fd9afc Guido Trotter
    # Remove the new instance from the Ganeti Lock Manager
2460 a2fd9afc Guido Trotter
    self.context.glm.remove(locking.LEVEL_INSTANCE, instance.name)
2461 a8083063 Iustin Pop
2462 a8083063 Iustin Pop
2463 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2464 a8083063 Iustin Pop
  """Logical unit for querying instances.
2465 a8083063 Iustin Pop

2466 a8083063 Iustin Pop
  """
2467 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2468 7eb9d8f7 Guido Trotter
  REQ_BGL = False
2469 a8083063 Iustin Pop
2470 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
2471 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2472 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2473 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2474 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2475 3fb1e1c5 Alexander Schreiber
                               "sda_size", "sdb_size", "vcpus", "tags",
2476 3fb1e1c5 Alexander Schreiber
                               "auto_balance",
2477 3fb1e1c5 Alexander Schreiber
                               "network_port", "kernel_path", "initrd_path",
2478 3fb1e1c5 Alexander Schreiber
                               "hvm_boot_order", "hvm_acpi", "hvm_pae",
2479 3fb1e1c5 Alexander Schreiber
                               "hvm_cdrom_image_path", "hvm_nic_type",
2480 3fb1e1c5 Alexander Schreiber
                               "hvm_disk_type", "vnc_bind_address"],
2481 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2482 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2483 a8083063 Iustin Pop
2484 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
2485 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2486 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2487 7eb9d8f7 Guido Trotter
2488 7eb9d8f7 Guido Trotter
    # TODO: we could lock instances (and nodes) only if the user asked for
2489 7eb9d8f7 Guido Trotter
    # dynamic fields. For that we need atomic ways to get info for a group of
2490 7eb9d8f7 Guido Trotter
    # instances from the config, though.
2491 7eb9d8f7 Guido Trotter
    if not self.op.names:
2492 7eb9d8f7 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = None # Acquire all
2493 7eb9d8f7 Guido Trotter
    else:
2494 7eb9d8f7 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = \
2495 7eb9d8f7 Guido Trotter
        _GetWantedInstances(self, self.op.names)
2496 7eb9d8f7 Guido Trotter
2497 7eb9d8f7 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2498 7eb9d8f7 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2499 7eb9d8f7 Guido Trotter
2500 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
2501 7eb9d8f7 Guido Trotter
    # TODO: locking of nodes could be avoided when not querying them
2502 7eb9d8f7 Guido Trotter
    if level == locking.LEVEL_NODE:
2503 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
2504 7eb9d8f7 Guido Trotter
2505 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
2506 7eb9d8f7 Guido Trotter
    """Check prerequisites.
2507 7eb9d8f7 Guido Trotter

2508 7eb9d8f7 Guido Trotter
    """
2509 7eb9d8f7 Guido Trotter
    # This of course is valid only if we locked the instances
2510 6683bba2 Guido Trotter
    self.wanted = self.acquired_locks[locking.LEVEL_INSTANCE]
2511 069dcc86 Iustin Pop
2512 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2513 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2514 a8083063 Iustin Pop

2515 a8083063 Iustin Pop
    """
2516 069dcc86 Iustin Pop
    instance_names = self.wanted
2517 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2518 a8083063 Iustin Pop
                     in instance_names]
2519 a8083063 Iustin Pop
2520 a8083063 Iustin Pop
    # begin data gathering
2521 a8083063 Iustin Pop
2522 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2523 a8083063 Iustin Pop
2524 a8083063 Iustin Pop
    bad_nodes = []
2525 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2526 a8083063 Iustin Pop
      live_data = {}
2527 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2528 a8083063 Iustin Pop
      for name in nodes:
2529 a8083063 Iustin Pop
        result = node_data[name]
2530 a8083063 Iustin Pop
        if result:
2531 a8083063 Iustin Pop
          live_data.update(result)
2532 a8083063 Iustin Pop
        elif result == False:
2533 a8083063 Iustin Pop
          bad_nodes.append(name)
2534 a8083063 Iustin Pop
        # else no instance is alive
2535 a8083063 Iustin Pop
    else:
2536 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2537 a8083063 Iustin Pop
2538 a8083063 Iustin Pop
    # end data gathering
2539 a8083063 Iustin Pop
2540 a8083063 Iustin Pop
    output = []
2541 a8083063 Iustin Pop
    for instance in instance_list:
2542 a8083063 Iustin Pop
      iout = []
2543 a8083063 Iustin Pop
      for field in self.op.output_fields:
2544 a8083063 Iustin Pop
        if field == "name":
2545 a8083063 Iustin Pop
          val = instance.name
2546 a8083063 Iustin Pop
        elif field == "os":
2547 a8083063 Iustin Pop
          val = instance.os
2548 a8083063 Iustin Pop
        elif field == "pnode":
2549 a8083063 Iustin Pop
          val = instance.primary_node
2550 a8083063 Iustin Pop
        elif field == "snodes":
2551 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2552 a8083063 Iustin Pop
        elif field == "admin_state":
2553 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2554 a8083063 Iustin Pop
        elif field == "oper_state":
2555 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2556 8a23d2d3 Iustin Pop
            val = None
2557 a8083063 Iustin Pop
          else:
2558 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2559 d8052456 Iustin Pop
        elif field == "status":
2560 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2561 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2562 d8052456 Iustin Pop
          else:
2563 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2564 d8052456 Iustin Pop
            if running:
2565 d8052456 Iustin Pop
              if instance.status != "down":
2566 d8052456 Iustin Pop
                val = "running"
2567 d8052456 Iustin Pop
              else:
2568 d8052456 Iustin Pop
                val = "ERROR_up"
2569 d8052456 Iustin Pop
            else:
2570 d8052456 Iustin Pop
              if instance.status != "down":
2571 d8052456 Iustin Pop
                val = "ERROR_down"
2572 d8052456 Iustin Pop
              else:
2573 d8052456 Iustin Pop
                val = "ADMIN_down"
2574 a8083063 Iustin Pop
        elif field == "admin_ram":
2575 a8083063 Iustin Pop
          val = instance.memory
2576 a8083063 Iustin Pop
        elif field == "oper_ram":
2577 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2578 8a23d2d3 Iustin Pop
            val = None
2579 a8083063 Iustin Pop
          elif instance.name in live_data:
2580 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2581 a8083063 Iustin Pop
          else:
2582 a8083063 Iustin Pop
            val = "-"
2583 a8083063 Iustin Pop
        elif field == "disk_template":
2584 a8083063 Iustin Pop
          val = instance.disk_template
2585 a8083063 Iustin Pop
        elif field == "ip":
2586 a8083063 Iustin Pop
          val = instance.nics[0].ip
2587 a8083063 Iustin Pop
        elif field == "bridge":
2588 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2589 a8083063 Iustin Pop
        elif field == "mac":
2590 a8083063 Iustin Pop
          val = instance.nics[0].mac
2591 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2592 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2593 644eeef9 Iustin Pop
          if disk is None:
2594 8a23d2d3 Iustin Pop
            val = None
2595 644eeef9 Iustin Pop
          else:
2596 644eeef9 Iustin Pop
            val = disk.size
2597 d6d415e8 Iustin Pop
        elif field == "vcpus":
2598 d6d415e8 Iustin Pop
          val = instance.vcpus
2599 130a6a6f Iustin Pop
        elif field == "tags":
2600 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2601 3fb1e1c5 Alexander Schreiber
        elif field in ("network_port", "kernel_path", "initrd_path",
2602 3fb1e1c5 Alexander Schreiber
                       "hvm_boot_order", "hvm_acpi", "hvm_pae",
2603 3fb1e1c5 Alexander Schreiber
                       "hvm_cdrom_image_path", "hvm_nic_type",
2604 3fb1e1c5 Alexander Schreiber
                       "hvm_disk_type", "vnc_bind_address"):
2605 3fb1e1c5 Alexander Schreiber
          val = getattr(instance, field, None)
2606 3fb1e1c5 Alexander Schreiber
          if val is not None:
2607 3fb1e1c5 Alexander Schreiber
            pass
2608 3fb1e1c5 Alexander Schreiber
          elif field in ("hvm_nic_type", "hvm_disk_type",
2609 3fb1e1c5 Alexander Schreiber
                         "kernel_path", "initrd_path"):
2610 3fb1e1c5 Alexander Schreiber
            val = "default"
2611 3fb1e1c5 Alexander Schreiber
          else:
2612 3fb1e1c5 Alexander Schreiber
            val = "-"
2613 a8083063 Iustin Pop
        else:
2614 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2615 a8083063 Iustin Pop
        iout.append(val)
2616 a8083063 Iustin Pop
      output.append(iout)
2617 a8083063 Iustin Pop
2618 a8083063 Iustin Pop
    return output
2619 a8083063 Iustin Pop
2620 a8083063 Iustin Pop
2621 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2622 a8083063 Iustin Pop
  """Failover an instance.
2623 a8083063 Iustin Pop

2624 a8083063 Iustin Pop
  """
2625 a8083063 Iustin Pop
  HPATH = "instance-failover"
2626 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2627 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2628 c9e5c064 Guido Trotter
  REQ_BGL = False
2629 c9e5c064 Guido Trotter
2630 c9e5c064 Guido Trotter
  def ExpandNames(self):
2631 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
2632 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2633 c9e5c064 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
2634 c9e5c064 Guido Trotter
2635 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
2636 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
2637 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
2638 a8083063 Iustin Pop
2639 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2640 a8083063 Iustin Pop
    """Build hooks env.
2641 a8083063 Iustin Pop

2642 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2643 a8083063 Iustin Pop

2644 a8083063 Iustin Pop
    """
2645 a8083063 Iustin Pop
    env = {
2646 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2647 a8083063 Iustin Pop
      }
2648 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2649 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2650 a8083063 Iustin Pop
    return env, nl, nl
2651 a8083063 Iustin Pop
2652 a8083063 Iustin Pop
  def CheckPrereq(self):
2653 a8083063 Iustin Pop
    """Check prerequisites.
2654 a8083063 Iustin Pop

2655 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2656 a8083063 Iustin Pop

2657 a8083063 Iustin Pop
    """
2658 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2659 c9e5c064 Guido Trotter
    assert self.instance is not None, \
2660 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2661 a8083063 Iustin Pop
2662 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2663 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2664 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2665 2a710df1 Michael Hanselmann
2666 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2667 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2668 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2669 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2670 2a710df1 Michael Hanselmann
2671 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2672 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2673 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2674 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2675 3a7c308e Guido Trotter
2676 a8083063 Iustin Pop
    # check bridge existance
2677 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2678 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2679 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2680 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2681 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2682 a8083063 Iustin Pop
2683 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2684 a8083063 Iustin Pop
    """Failover an instance.
2685 a8083063 Iustin Pop

2686 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2687 a8083063 Iustin Pop
    starting it on the secondary.
2688 a8083063 Iustin Pop

2689 a8083063 Iustin Pop
    """
2690 a8083063 Iustin Pop
    instance = self.instance
2691 a8083063 Iustin Pop
2692 a8083063 Iustin Pop
    source_node = instance.primary_node
2693 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2694 a8083063 Iustin Pop
2695 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2696 a8083063 Iustin Pop
    for dev in instance.disks:
2697 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2698 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2699 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2700 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2701 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2702 a8083063 Iustin Pop
2703 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2704 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2705 a8083063 Iustin Pop
                (instance.name, source_node))
2706 a8083063 Iustin Pop
2707 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2708 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2709 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2710 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2711 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2712 24a40d57 Iustin Pop
      else:
2713 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2714 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2715 a8083063 Iustin Pop
2716 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2717 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2718 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2719 a8083063 Iustin Pop
2720 a8083063 Iustin Pop
    instance.primary_node = target_node
2721 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2722 b6102dab Guido Trotter
    self.cfg.Update(instance)
2723 a8083063 Iustin Pop
2724 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2725 12a0cfbe Guido Trotter
    if instance.status == "up":
2726 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2727 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2728 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2729 12a0cfbe Guido Trotter
2730 12a0cfbe Guido Trotter
      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2731 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2732 12a0cfbe Guido Trotter
      if not disks_ok:
2733 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2734 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2735 a8083063 Iustin Pop
2736 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2737 12a0cfbe Guido Trotter
      if not rpc.call_instance_start(target_node, instance, None):
2738 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2739 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2740 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2741 a8083063 Iustin Pop
2742 a8083063 Iustin Pop
2743 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2744 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2745 a8083063 Iustin Pop

2746 a8083063 Iustin Pop
  This always creates all devices.
2747 a8083063 Iustin Pop

2748 a8083063 Iustin Pop
  """
2749 a8083063 Iustin Pop
  if device.children:
2750 a8083063 Iustin Pop
    for child in device.children:
2751 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2752 a8083063 Iustin Pop
        return False
2753 a8083063 Iustin Pop
2754 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2755 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2756 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2757 a8083063 Iustin Pop
  if not new_id:
2758 a8083063 Iustin Pop
    return False
2759 a8083063 Iustin Pop
  if device.physical_id is None:
2760 a8083063 Iustin Pop
    device.physical_id = new_id
2761 a8083063 Iustin Pop
  return True
2762 a8083063 Iustin Pop
2763 a8083063 Iustin Pop
2764 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2765 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2766 a8083063 Iustin Pop

2767 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2768 a8083063 Iustin Pop
  all its children.
2769 a8083063 Iustin Pop

2770 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2771 a8083063 Iustin Pop

2772 a8083063 Iustin Pop
  """
2773 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2774 a8083063 Iustin Pop
    force = True
2775 a8083063 Iustin Pop
  if device.children:
2776 a8083063 Iustin Pop
    for child in device.children:
2777 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2778 3f78eef2 Iustin Pop
                                        child, force, info):
2779 a8083063 Iustin Pop
        return False
2780 a8083063 Iustin Pop
2781 a8083063 Iustin Pop
  if not force:
2782 a8083063 Iustin Pop
    return True
2783 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2784 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2785 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2786 a8083063 Iustin Pop
  if not new_id:
2787 a8083063 Iustin Pop
    return False
2788 a8083063 Iustin Pop
  if device.physical_id is None:
2789 a8083063 Iustin Pop
    device.physical_id = new_id
2790 a8083063 Iustin Pop
  return True
2791 a8083063 Iustin Pop
2792 a8083063 Iustin Pop
2793 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2794 923b1523 Iustin Pop
  """Generate a suitable LV name.
2795 923b1523 Iustin Pop

2796 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2797 923b1523 Iustin Pop

2798 923b1523 Iustin Pop
  """
2799 923b1523 Iustin Pop
  results = []
2800 923b1523 Iustin Pop
  for val in exts:
2801 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2802 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2803 923b1523 Iustin Pop
  return results
2804 923b1523 Iustin Pop
2805 923b1523 Iustin Pop
2806 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2807 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2808 a1f445d3 Iustin Pop

2809 a1f445d3 Iustin Pop
  """
2810 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2811 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2812 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2813 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2814 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2815 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2816 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2817 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2818 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2819 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2820 a1f445d3 Iustin Pop
  return drbd_dev
2821 a1f445d3 Iustin Pop
2822 7c0d6283 Michael Hanselmann
2823 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2824 a8083063 Iustin Pop
                          instance_name, primary_node,
2825 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
2826 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
2827 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2828 a8083063 Iustin Pop

2829 a8083063 Iustin Pop
  """
2830 a8083063 Iustin Pop
  #TODO: compute space requirements
2831 a8083063 Iustin Pop
2832 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2833 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2834 a8083063 Iustin Pop
    disks = []
2835 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2836 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2837 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2838 923b1523 Iustin Pop
2839 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2840 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2841 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2842 a8083063 Iustin Pop
                           iv_name = "sda")
2843 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2844 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2845 a8083063 Iustin Pop
                           iv_name = "sdb")
2846 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2847 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2848 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2849 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2850 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2851 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2852 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2853 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2854 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2855 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2856 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2857 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2858 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
2859 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
2860 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
2861 0f1a06e3 Manuel Franceschini
2862 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
2863 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
2864 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
2865 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
2866 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
2867 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
2868 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
2869 a8083063 Iustin Pop
  else:
2870 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2871 a8083063 Iustin Pop
  return disks
2872 a8083063 Iustin Pop
2873 a8083063 Iustin Pop
2874 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2875 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2876 3ecf6786 Iustin Pop

2877 3ecf6786 Iustin Pop
  """
2878 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2879 a0c3fea1 Michael Hanselmann
2880 a0c3fea1 Michael Hanselmann
2881 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2882 a8083063 Iustin Pop
  """Create all disks for an instance.
2883 a8083063 Iustin Pop

2884 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2885 a8083063 Iustin Pop

2886 a8083063 Iustin Pop
  Args:
2887 a8083063 Iustin Pop
    instance: the instance object
2888 a8083063 Iustin Pop

2889 a8083063 Iustin Pop
  Returns:
2890 a8083063 Iustin Pop
    True or False showing the success of the creation process
2891 a8083063 Iustin Pop

2892 a8083063 Iustin Pop
  """
2893 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2894 a0c3fea1 Michael Hanselmann
2895 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2896 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2897 0f1a06e3 Manuel Franceschini
    result = rpc.call_file_storage_dir_create(instance.primary_node,
2898 0f1a06e3 Manuel Franceschini
                                              file_storage_dir)
2899 0f1a06e3 Manuel Franceschini
2900 0f1a06e3 Manuel Franceschini
    if not result:
2901 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
2902 0f1a06e3 Manuel Franceschini
      return False
2903 0f1a06e3 Manuel Franceschini
2904 0f1a06e3 Manuel Franceschini
    if not result[0]:
2905 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
2906 0f1a06e3 Manuel Franceschini
      return False
2907 0f1a06e3 Manuel Franceschini
2908 a8083063 Iustin Pop
  for device in instance.disks:
2909 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2910 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
2911 a8083063 Iustin Pop
    #HARDCODE
2912 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2913 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2914 3f78eef2 Iustin Pop
                                        device, False, info):
2915 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2916 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2917 a8083063 Iustin Pop
        return False
2918 a8083063 Iustin Pop
    #HARDCODE
2919 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2920 3f78eef2 Iustin Pop
                                    instance, device, info):
2921 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2922 a8083063 Iustin Pop
                   device.iv_name)
2923 a8083063 Iustin Pop
      return False
2924 1c6e3627 Manuel Franceschini
2925 a8083063 Iustin Pop
  return True
2926 a8083063 Iustin Pop
2927 a8083063 Iustin Pop
2928 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2929 a8083063 Iustin Pop
  """Remove all disks for an instance.
2930 a8083063 Iustin Pop

2931 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2932 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2933 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2934 a8083063 Iustin Pop
  with `_CreateDisks()`).
2935 a8083063 Iustin Pop

2936 a8083063 Iustin Pop
  Args:
2937 a8083063 Iustin Pop
    instance: the instance object
2938 a8083063 Iustin Pop

2939 a8083063 Iustin Pop
  Returns:
2940 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2941 a8083063 Iustin Pop

2942 a8083063 Iustin Pop
  """
2943 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2944 a8083063 Iustin Pop
2945 a8083063 Iustin Pop
  result = True
2946 a8083063 Iustin Pop
  for device in instance.disks:
2947 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2948 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2949 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2950 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2951 a8083063 Iustin Pop
                     " continuing anyway" %
2952 a8083063 Iustin Pop
                     (device.iv_name, node))
2953 a8083063 Iustin Pop
        result = False
2954 0f1a06e3 Manuel Franceschini
2955 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2956 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2957 0f1a06e3 Manuel Franceschini
    if not rpc.call_file_storage_dir_remove(instance.primary_node,
2958 0f1a06e3 Manuel Franceschini
                                            file_storage_dir):
2959 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
2960 0f1a06e3 Manuel Franceschini
      result = False
2961 0f1a06e3 Manuel Franceschini
2962 a8083063 Iustin Pop
  return result
2963 a8083063 Iustin Pop
2964 a8083063 Iustin Pop
2965 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
2966 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
2967 e2fe6369 Iustin Pop

2968 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
2969 e2fe6369 Iustin Pop

2970 e2fe6369 Iustin Pop
  """
2971 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
2972 e2fe6369 Iustin Pop
  req_size_dict = {
2973 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
2974 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
2975 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
2976 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
2977 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
2978 e2fe6369 Iustin Pop
  }
2979 e2fe6369 Iustin Pop
2980 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
2981 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
2982 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
2983 e2fe6369 Iustin Pop
2984 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
2985 e2fe6369 Iustin Pop
2986 e2fe6369 Iustin Pop
2987 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2988 a8083063 Iustin Pop
  """Create an instance.
2989 a8083063 Iustin Pop

2990 a8083063 Iustin Pop
  """
2991 a8083063 Iustin Pop
  HPATH = "instance-add"
2992 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2993 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
2994 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2995 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
2996 a8083063 Iustin Pop
2997 538475ca Iustin Pop
  def _RunAllocator(self):
2998 538475ca Iustin Pop
    """Run the allocator based on input opcode.
2999 538475ca Iustin Pop

3000 538475ca Iustin Pop
    """
3001 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3002 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3003 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3004 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3005 d1c2dd75 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3006 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3007 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3008 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3009 d1c2dd75 Iustin Pop
                     tags=[],
3010 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3011 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
3012 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
3013 d1c2dd75 Iustin Pop
                     disks=disks,
3014 d1c2dd75 Iustin Pop
                     nics=nics,
3015 29859cb7 Iustin Pop
                     )
3016 d1c2dd75 Iustin Pop
3017 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3018 d1c2dd75 Iustin Pop
3019 d1c2dd75 Iustin Pop
    if not ial.success:
3020 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3021 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3022 d1c2dd75 Iustin Pop
                                                           ial.info))
3023 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3024 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3025 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3026 27579978 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3027 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3028 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
3029 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
3030 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
3031 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
3032 27579978 Iustin Pop
    if ial.required_nodes == 2:
3033 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3034 538475ca Iustin Pop
3035 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3036 a8083063 Iustin Pop
    """Build hooks env.
3037 a8083063 Iustin Pop

3038 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3039 a8083063 Iustin Pop

3040 a8083063 Iustin Pop
    """
3041 a8083063 Iustin Pop
    env = {
3042 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3043 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3044 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3045 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3046 a8083063 Iustin Pop
      }
3047 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3048 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3049 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3050 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
3051 396e1b78 Michael Hanselmann
3052 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3053 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3054 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3055 396e1b78 Michael Hanselmann
      status=self.instance_status,
3056 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3057 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
3058 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
3059 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3060 396e1b78 Michael Hanselmann
    ))
3061 a8083063 Iustin Pop
3062 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
3063 a8083063 Iustin Pop
          self.secondaries)
3064 a8083063 Iustin Pop
    return env, nl, nl
3065 a8083063 Iustin Pop
3066 a8083063 Iustin Pop
3067 a8083063 Iustin Pop
  def CheckPrereq(self):
3068 a8083063 Iustin Pop
    """Check prerequisites.
3069 a8083063 Iustin Pop

3070 a8083063 Iustin Pop
    """
3071 538475ca Iustin Pop
    # set optional parameters to none if they don't exist
3072 538475ca Iustin Pop
    for attr in ["kernel_path", "initrd_path", "hvm_boot_order", "pnode",
3073 31a853d2 Iustin Pop
                 "iallocator", "hvm_acpi", "hvm_pae", "hvm_cdrom_image_path",
3074 5397e0b7 Alexander Schreiber
                 "hvm_nic_type", "hvm_disk_type", "vnc_bind_address"]:
3075 40ed12dd Guido Trotter
      if not hasattr(self.op, attr):
3076 40ed12dd Guido Trotter
        setattr(self.op, attr, None)
3077 40ed12dd Guido Trotter
3078 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
3079 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
3080 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3081 3ecf6786 Iustin Pop
                                 self.op.mode)
3082 a8083063 Iustin Pop
3083 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3084 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3085 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3086 eedc99de Manuel Franceschini
                                 " instances")
3087 eedc99de Manuel Franceschini
3088 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3089 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
3090 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
3091 a8083063 Iustin Pop
      if src_node is None or src_path is None:
3092 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
3093 3ecf6786 Iustin Pop
                                   " node and path options")
3094 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
3095 a8083063 Iustin Pop
      if src_node_full is None:
3096 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
3097 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
3098 a8083063 Iustin Pop
3099 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
3100 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
3101 a8083063 Iustin Pop
3102 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
3103 a8083063 Iustin Pop
3104 a8083063 Iustin Pop
      if not export_info:
3105 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3106 a8083063 Iustin Pop
3107 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3108 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3109 a8083063 Iustin Pop
3110 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3111 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3112 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3113 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3114 a8083063 Iustin Pop
3115 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3116 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3117 3ecf6786 Iustin Pop
                                   " one data disk")
3118 a8083063 Iustin Pop
3119 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3120 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3121 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3122 a8083063 Iustin Pop
                                                         'disk0_dump'))
3123 a8083063 Iustin Pop
      self.src_image = diskimage
3124 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
3125 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
3126 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
3127 a8083063 Iustin Pop
3128 901a65c1 Iustin Pop
    #### instance parameters check
3129 901a65c1 Iustin Pop
3130 a8083063 Iustin Pop
    # disk template and mirror node verification
3131 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3132 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
3133 a8083063 Iustin Pop
3134 901a65c1 Iustin Pop
    # instance name verification
3135 901a65c1 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
3136 901a65c1 Iustin Pop
3137 901a65c1 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
3138 901a65c1 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
3139 901a65c1 Iustin Pop
    if instance_name in instance_list:
3140 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3141 901a65c1 Iustin Pop
                                 instance_name)
3142 901a65c1 Iustin Pop
3143 901a65c1 Iustin Pop
    # ip validity checks
3144 901a65c1 Iustin Pop
    ip = getattr(self.op, "ip", None)
3145 901a65c1 Iustin Pop
    if ip is None or ip.lower() == "none":
3146 901a65c1 Iustin Pop
      inst_ip = None
3147 901a65c1 Iustin Pop
    elif ip.lower() == "auto":
3148 901a65c1 Iustin Pop
      inst_ip = hostname1.ip
3149 901a65c1 Iustin Pop
    else:
3150 901a65c1 Iustin Pop
      if not utils.IsValidIP(ip):
3151 901a65c1 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3152 901a65c1 Iustin Pop
                                   " like a valid IP" % ip)
3153 901a65c1 Iustin Pop
      inst_ip = ip
3154 901a65c1 Iustin Pop
    self.inst_ip = self.op.ip = inst_ip
3155 901a65c1 Iustin Pop
3156 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3157 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3158 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3159 901a65c1 Iustin Pop
3160 901a65c1 Iustin Pop
    if self.op.ip_check:
3161 901a65c1 Iustin Pop
      if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT):
3162 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3163 901a65c1 Iustin Pop
                                   (hostname1.ip, instance_name))
3164 901a65c1 Iustin Pop
3165 901a65c1 Iustin Pop
    # MAC address verification
3166 901a65c1 Iustin Pop
    if self.op.mac != "auto":
3167 901a65c1 Iustin Pop
      if not utils.IsValidMac(self.op.mac.lower()):
3168 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3169 901a65c1 Iustin Pop
                                   self.op.mac)
3170 901a65c1 Iustin Pop
3171 901a65c1 Iustin Pop
    # bridge verification
3172 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3173 901a65c1 Iustin Pop
    if bridge is None:
3174 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3175 901a65c1 Iustin Pop
    else:
3176 901a65c1 Iustin Pop
      self.op.bridge = bridge
3177 901a65c1 Iustin Pop
3178 901a65c1 Iustin Pop
    # boot order verification
3179 901a65c1 Iustin Pop
    if self.op.hvm_boot_order is not None:
3180 901a65c1 Iustin Pop
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3181 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid boot order specified,"
3182 901a65c1 Iustin Pop
                                   " must be one or more of [acdn]")
3183 901a65c1 Iustin Pop
    # file storage checks
3184 0f1a06e3 Manuel Franceschini
    if (self.op.file_driver and
3185 0f1a06e3 Manuel Franceschini
        not self.op.file_driver in constants.FILE_DRIVER):
3186 0f1a06e3 Manuel Franceschini
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3187 0f1a06e3 Manuel Franceschini
                                 self.op.file_driver)
3188 0f1a06e3 Manuel Franceschini
3189 0f1a06e3 Manuel Franceschini
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3190 b4de68a9 Iustin Pop
      raise errors.OpPrereqError("File storage directory not a relative"
3191 b4de68a9 Iustin Pop
                                 " path")
3192 538475ca Iustin Pop
    #### allocator run
3193 538475ca Iustin Pop
3194 538475ca Iustin Pop
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3195 538475ca Iustin Pop
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3196 538475ca Iustin Pop
                                 " node must be given")
3197 538475ca Iustin Pop
3198 538475ca Iustin Pop
    if self.op.iallocator is not None:
3199 538475ca Iustin Pop
      self._RunAllocator()
3200 0f1a06e3 Manuel Franceschini
3201 901a65c1 Iustin Pop
    #### node related checks
3202 901a65c1 Iustin Pop
3203 901a65c1 Iustin Pop
    # check primary node
3204 901a65c1 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
3205 901a65c1 Iustin Pop
    if pnode is None:
3206 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
3207 901a65c1 Iustin Pop
                                 self.op.pnode)
3208 901a65c1 Iustin Pop
    self.op.pnode = pnode.name
3209 901a65c1 Iustin Pop
    self.pnode = pnode
3210 901a65c1 Iustin Pop
    self.secondaries = []
3211 901a65c1 Iustin Pop
3212 901a65c1 Iustin Pop
    # mirror node verification
3213 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3214 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
3215 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3216 3ecf6786 Iustin Pop
                                   " a mirror node")
3217 a8083063 Iustin Pop
3218 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
3219 a8083063 Iustin Pop
      if snode_name is None:
3220 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
3221 3ecf6786 Iustin Pop
                                   self.op.snode)
3222 a8083063 Iustin Pop
      elif snode_name == pnode.name:
3223 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3224 3ecf6786 Iustin Pop
                                   " the primary node.")
3225 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
3226 a8083063 Iustin Pop
3227 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3228 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3229 ed1ebc60 Guido Trotter
3230 8d75db10 Iustin Pop
    # Check lv size requirements
3231 8d75db10 Iustin Pop
    if req_size is not None:
3232 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3233 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3234 8d75db10 Iustin Pop
      for node in nodenames:
3235 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3236 8d75db10 Iustin Pop
        if not info:
3237 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3238 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3239 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3240 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3241 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3242 8d75db10 Iustin Pop
                                     " node %s" % node)
3243 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3244 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3245 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3246 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3247 ed1ebc60 Guido Trotter
3248 a8083063 Iustin Pop
    # os verification
3249 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3250 dfa96ded Guido Trotter
    if not os_obj:
3251 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3252 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3253 a8083063 Iustin Pop
3254 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3255 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3256 3b6d8c9b Iustin Pop
3257 a8083063 Iustin Pop
3258 901a65c1 Iustin Pop
    # bridge check on primary node
3259 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3260 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3261 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3262 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3263 a8083063 Iustin Pop
3264 49ce1563 Iustin Pop
    # memory check on primary node
3265 49ce1563 Iustin Pop
    if self.op.start:
3266 49ce1563 Iustin Pop
      _CheckNodeFreeMemory(self.cfg, self.pnode.name,
3267 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3268 49ce1563 Iustin Pop
                           self.op.mem_size)
3269 49ce1563 Iustin Pop
3270 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
3271 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
3272 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
3273 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
3274 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
3275 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3276 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
3277 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
3278 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
3279 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
3280 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3281 31a853d2 Iustin Pop
3282 31a853d2 Iustin Pop
    # vnc_bind_address verification
3283 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
3284 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
3285 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
3286 31a853d2 Iustin Pop
                                   " like a valid IP address" %
3287 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
3288 31a853d2 Iustin Pop
3289 5397e0b7 Alexander Schreiber
    # Xen HVM device type checks
3290 5397e0b7 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
3291 5397e0b7 Alexander Schreiber
      if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
3292 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid NIC type %s specified for Xen HVM"
3293 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_nic_type)
3294 5397e0b7 Alexander Schreiber
      if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
3295 5397e0b7 Alexander Schreiber
        raise errors.OpPrereqError("Invalid disk type %s specified for Xen HVM"
3296 5397e0b7 Alexander Schreiber
                                   " hypervisor" % self.op.hvm_disk_type)
3297 5397e0b7 Alexander Schreiber
3298 a8083063 Iustin Pop
    if self.op.start:
3299 a8083063 Iustin Pop
      self.instance_status = 'up'
3300 a8083063 Iustin Pop
    else:
3301 a8083063 Iustin Pop
      self.instance_status = 'down'
3302 a8083063 Iustin Pop
3303 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3304 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3305 a8083063 Iustin Pop

3306 a8083063 Iustin Pop
    """
3307 a8083063 Iustin Pop
    instance = self.op.instance_name
3308 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3309 a8083063 Iustin Pop
3310 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3311 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3312 1862d460 Alexander Schreiber
    else:
3313 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3314 1862d460 Alexander Schreiber
3315 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3316 a8083063 Iustin Pop
    if self.inst_ip is not None:
3317 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3318 a8083063 Iustin Pop
3319 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3320 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3321 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3322 2a6469d5 Alexander Schreiber
    else:
3323 2a6469d5 Alexander Schreiber
      network_port = None
3324 58acb49d Alexander Schreiber
3325 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is None:
3326 31a853d2 Iustin Pop
      self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3327 31a853d2 Iustin Pop
3328 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3329 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3330 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3331 2c313123 Manuel Franceschini
    else:
3332 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3333 2c313123 Manuel Franceschini
3334 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3335 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3336 0f1a06e3 Manuel Franceschini
                                        self.sstore.GetFileStorageDir(),
3337 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3338 0f1a06e3 Manuel Franceschini
3339 0f1a06e3 Manuel Franceschini
3340 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3341 a8083063 Iustin Pop
                                  self.op.disk_template,
3342 a8083063 Iustin Pop
                                  instance, pnode_name,
3343 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3344 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3345 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3346 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3347 a8083063 Iustin Pop
3348 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3349 a8083063 Iustin Pop
                            primary_node=pnode_name,
3350 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3351 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3352 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3353 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3354 a8083063 Iustin Pop
                            status=self.instance_status,
3355 58acb49d Alexander Schreiber
                            network_port=network_port,
3356 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3357 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3358 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3359 31a853d2 Iustin Pop
                            hvm_acpi=self.op.hvm_acpi,
3360 31a853d2 Iustin Pop
                            hvm_pae=self.op.hvm_pae,
3361 31a853d2 Iustin Pop
                            hvm_cdrom_image_path=self.op.hvm_cdrom_image_path,
3362 31a853d2 Iustin Pop
                            vnc_bind_address=self.op.vnc_bind_address,
3363 5397e0b7 Alexander Schreiber
                            hvm_nic_type=self.op.hvm_nic_type,
3364 5397e0b7 Alexander Schreiber
                            hvm_disk_type=self.op.hvm_disk_type,
3365 a8083063 Iustin Pop
                            )
3366 a8083063 Iustin Pop
3367 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3368 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3369 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3370 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3371 a8083063 Iustin Pop
3372 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3373 a8083063 Iustin Pop
3374 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3375 a2fd9afc Guido Trotter
    # Add the new instance to the Ganeti Lock Manager
3376 a2fd9afc Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, instance)
3377 a8083063 Iustin Pop
3378 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3379 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3380 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3381 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3382 a8083063 Iustin Pop
      time.sleep(15)
3383 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3384 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3385 a8083063 Iustin Pop
    else:
3386 a8083063 Iustin Pop
      disk_abort = False
3387 a8083063 Iustin Pop
3388 a8083063 Iustin Pop
    if disk_abort:
3389 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3390 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3391 a2fd9afc Guido Trotter
      # Remove the new instance from the Ganeti Lock Manager
3392 a2fd9afc Guido Trotter
      self.context.glm.remove(locking.LEVEL_INSTANCE, iobj.name)
3393 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3394 3ecf6786 Iustin Pop
                               " this instance")
3395 a8083063 Iustin Pop
3396 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3397 a8083063 Iustin Pop
                (instance, pnode_name))
3398 a8083063 Iustin Pop
3399 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3400 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3401 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3402 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3403 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3404 3ecf6786 Iustin Pop
                                   " on node %s" %
3405 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3406 a8083063 Iustin Pop
3407 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3408 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3409 a8083063 Iustin Pop
        src_node = self.op.src_node
3410 a8083063 Iustin Pop
        src_image = self.src_image
3411 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3412 a8083063 Iustin Pop
                                                src_node, src_image):
3413 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3414 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3415 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3416 a8083063 Iustin Pop
      else:
3417 a8083063 Iustin Pop
        # also checked in the prereq part
3418 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3419 3ecf6786 Iustin Pop
                                     % self.op.mode)
3420 a8083063 Iustin Pop
3421 a8083063 Iustin Pop
    if self.op.start:
3422 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3423 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3424 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3425 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3426 a8083063 Iustin Pop
3427 a8083063 Iustin Pop
3428 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3429 a8083063 Iustin Pop
  """Connect to an instance's console.
3430 a8083063 Iustin Pop

3431 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3432 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3433 a8083063 Iustin Pop
  console.
3434 a8083063 Iustin Pop

3435 a8083063 Iustin Pop
  """
3436 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3437 8659b73e Guido Trotter
  REQ_BGL = False
3438 8659b73e Guido Trotter
3439 8659b73e Guido Trotter
  def ExpandNames(self):
3440 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
3441 a8083063 Iustin Pop
3442 a8083063 Iustin Pop
  def CheckPrereq(self):
3443 a8083063 Iustin Pop
    """Check prerequisites.
3444 a8083063 Iustin Pop

3445 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3446 a8083063 Iustin Pop

3447 a8083063 Iustin Pop
    """
3448 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3449 8659b73e Guido Trotter
    assert self.instance is not None, \
3450 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3451 a8083063 Iustin Pop
3452 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3453 a8083063 Iustin Pop
    """Connect to the console of an instance
3454 a8083063 Iustin Pop

3455 a8083063 Iustin Pop
    """
3456 a8083063 Iustin Pop
    instance = self.instance
3457 a8083063 Iustin Pop
    node = instance.primary_node
3458 a8083063 Iustin Pop
3459 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3460 a8083063 Iustin Pop
    if node_insts is False:
3461 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3462 a8083063 Iustin Pop
3463 a8083063 Iustin Pop
    if instance.name not in node_insts:
3464 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3465 a8083063 Iustin Pop
3466 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3467 a8083063 Iustin Pop
3468 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3469 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3470 b047857b Michael Hanselmann
3471 82122173 Iustin Pop
    # build ssh cmdline
3472 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3473 a8083063 Iustin Pop
3474 a8083063 Iustin Pop
3475 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3476 a8083063 Iustin Pop
  """Replace the disks of an instance.
3477 a8083063 Iustin Pop

3478 a8083063 Iustin Pop
  """
3479 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3480 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3481 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3482 a8083063 Iustin Pop
3483 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3484 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3485 b6e82a65 Iustin Pop

3486 b6e82a65 Iustin Pop
    """
3487 b6e82a65 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3488 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3489 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3490 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3491 b6e82a65 Iustin Pop
3492 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3493 b6e82a65 Iustin Pop
3494 b6e82a65 Iustin Pop
    if not ial.success:
3495 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3496 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3497 b6e82a65 Iustin Pop
                                                           ial.info))
3498 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3499 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3500 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3501 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3502 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3503 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3504 b6e82a65 Iustin Pop
                    self.op.remote_node)
3505 b6e82a65 Iustin Pop
3506 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3507 a8083063 Iustin Pop
    """Build hooks env.
3508 a8083063 Iustin Pop

3509 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3510 a8083063 Iustin Pop

3511 a8083063 Iustin Pop
    """
3512 a8083063 Iustin Pop
    env = {
3513 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3514 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3515 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3516 a8083063 Iustin Pop
      }
3517 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3518 0834c866 Iustin Pop
    nl = [
3519 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3520 0834c866 Iustin Pop
      self.instance.primary_node,
3521 0834c866 Iustin Pop
      ]
3522 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3523 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3524 a8083063 Iustin Pop
    return env, nl, nl
3525 a8083063 Iustin Pop
3526 a8083063 Iustin Pop
  def CheckPrereq(self):
3527 a8083063 Iustin Pop
    """Check prerequisites.
3528 a8083063 Iustin Pop

3529 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3530 a8083063 Iustin Pop

3531 a8083063 Iustin Pop
    """
3532 b6e82a65 Iustin Pop
    if not hasattr(self.op, "remote_node"):
3533 b6e82a65 Iustin Pop
      self.op.remote_node = None
3534 b6e82a65 Iustin Pop
3535 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3536 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3537 a8083063 Iustin Pop
    if instance is None:
3538 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3539 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3540 a8083063 Iustin Pop
    self.instance = instance
3541 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3542 a8083063 Iustin Pop
3543 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3544 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3545 a9e0c397 Iustin Pop
                                 " network mirrored.")
3546 a8083063 Iustin Pop
3547 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3548 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3549 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3550 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3551 a8083063 Iustin Pop
3552 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3553 a9e0c397 Iustin Pop
3554 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3555 b6e82a65 Iustin Pop
    if ia_name is not None:
3556 b6e82a65 Iustin Pop
      if self.op.remote_node is not None:
3557 b6e82a65 Iustin Pop
        raise errors.OpPrereqError("Give either the iallocator or the new"
3558 b6e82a65 Iustin Pop
                                   " secondary, not both")
3559 b6e82a65 Iustin Pop
      self.op.remote_node = self._RunAllocator()
3560 b6e82a65 Iustin Pop
3561 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3562 a9e0c397 Iustin Pop
    if remote_node is not None:
3563 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3564 a8083063 Iustin Pop
      if remote_node is None:
3565 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3566 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3567 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3568 a9e0c397 Iustin Pop
    else:
3569 a9e0c397 Iustin Pop
      self.remote_node_info = None
3570 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3571 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3572 3ecf6786 Iustin Pop
                                 " the instance.")
3573 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3574 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3575 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3576 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3577 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3578 0834c866 Iustin Pop
                                   " replacement")
3579 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3580 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3581 7df43a76 Iustin Pop
          remote_node is not None):
3582 7df43a76 Iustin Pop
        # switch to replace secondary mode
3583 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3584 7df43a76 Iustin Pop
3585 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3586 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3587 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3588 a9e0c397 Iustin Pop
                                   " both at once")
3589 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3590 a9e0c397 Iustin Pop
        if remote_node is not None:
3591 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3592 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3593 a9e0c397 Iustin Pop
                                     " node disk replacement")
3594 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3595 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3596 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3597 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3598 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3599 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3600 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3601 a9e0c397 Iustin Pop
      else:
3602 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3603 a9e0c397 Iustin Pop
3604 a9e0c397 Iustin Pop
    for name in self.op.disks:
3605 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3606 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3607 a9e0c397 Iustin Pop
                                   (name, instance.name))
3608 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3609 a8083063 Iustin Pop
3610 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3611 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3612 a9e0c397 Iustin Pop

3613 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3614 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3615 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3616 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3617 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3618 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3619 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3620 a9e0c397 Iustin Pop
      - wait for sync across all devices
3621 a9e0c397 Iustin Pop
      - for each modified disk:
3622 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3623 a9e0c397 Iustin Pop

3624 a9e0c397 Iustin Pop
    Failures are not very well handled.
3625 cff90b79 Iustin Pop

3626 a9e0c397 Iustin Pop
    """
3627 cff90b79 Iustin Pop
    steps_total = 6
3628 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3629 a9e0c397 Iustin Pop
    instance = self.instance
3630 a9e0c397 Iustin Pop
    iv_names = {}
3631 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3632 a9e0c397 Iustin Pop
    # start of work
3633 a9e0c397 Iustin Pop
    cfg = self.cfg
3634 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3635 cff90b79 Iustin Pop
    oth_node = self.oth_node
3636 cff90b79 Iustin Pop
3637 cff90b79 Iustin Pop
    # Step: check device activation
3638 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3639 cff90b79 Iustin Pop
    info("checking volume groups")
3640 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3641 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3642 cff90b79 Iustin Pop
    if not results:
3643 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3644 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3645 cff90b79 Iustin Pop
      res = results.get(node, False)
3646 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3647 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3648 cff90b79 Iustin Pop
                                 (my_vg, node))
3649 cff90b79 Iustin Pop
    for dev in instance.disks:
3650 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3651 cff90b79 Iustin Pop
        continue
3652 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3653 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3654 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3655 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3656 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3657 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3658 cff90b79 Iustin Pop
3659 cff90b79 Iustin Pop
    # Step: check other node consistency
3660 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3661 cff90b79 Iustin Pop
    for dev in instance.disks:
3662 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3663 cff90b79 Iustin Pop
        continue
3664 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3665 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3666 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3667 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3668 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3669 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3670 cff90b79 Iustin Pop
3671 cff90b79 Iustin Pop
    # Step: create new storage
3672 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3673 a9e0c397 Iustin Pop
    for dev in instance.disks:
3674 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3675 a9e0c397 Iustin Pop
        continue
3676 a9e0c397 Iustin Pop
      size = dev.size
3677 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3678 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3679 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3680 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3681 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3682 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3683 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3684 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3685 a9e0c397 Iustin Pop
      old_lvs = dev.children
3686 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3687 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3688 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3689 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3690 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3691 a9e0c397 Iustin Pop
      # are talking about the secondary node
3692 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3693 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3694 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3695 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3696 a9e0c397 Iustin Pop
                                   " node '%s'" %
3697 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3698 a9e0c397 Iustin Pop
3699 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3700 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3701 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3702 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3703 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3704 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3705 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3706 cff90b79 Iustin Pop
      #dev.children = []
3707 cff90b79 Iustin Pop
      #cfg.Update(instance)
3708 a9e0c397 Iustin Pop
3709 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3710 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3711 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3712 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3713 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3714 cff90b79 Iustin Pop
3715 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3716 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3717 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3718 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3719 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3720 cff90b79 Iustin Pop
      rlist = []
3721 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3722 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3723 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3724 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3725 cff90b79 Iustin Pop
3726 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3727 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3728 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3729 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3730 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3731 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3732 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3733 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3734 cff90b79 Iustin Pop
3735 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3736 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3737 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3738 a9e0c397 Iustin Pop
3739 cff90b79 Iustin Pop
      for disk in old_lvs:
3740 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3741 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3742 a9e0c397 Iustin Pop
3743 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3744 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3745 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3746 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3747 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3748 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3749 cff90b79 Iustin Pop
                    " logical volumes")
3750 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3751 a9e0c397 Iustin Pop
3752 a9e0c397 Iustin Pop
      dev.children = new_lvs
3753 a9e0c397 Iustin Pop
      cfg.Update(instance)
3754 a9e0c397 Iustin Pop
3755 cff90b79 Iustin Pop
    # Step: wait for sync
3756 a9e0c397 Iustin Pop
3757 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3758 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3759 a9e0c397 Iustin Pop
    # return value
3760 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3761 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3762 a9e0c397 Iustin Pop
3763 a9e0c397 Iustin Pop
    # so check manually all the devices
3764 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3765 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3766 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3767 a9e0c397 Iustin Pop
      if is_degr:
3768 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3769 a9e0c397 Iustin Pop
3770 cff90b79 Iustin Pop
    # Step: remove old storage
3771 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3772 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3773 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3774 a9e0c397 Iustin Pop
      for lv in old_lvs:
3775 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3776 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3777 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3778 a9e0c397 Iustin Pop
          continue
3779 a9e0c397 Iustin Pop
3780 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3781 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3782 a9e0c397 Iustin Pop

3783 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3784 a9e0c397 Iustin Pop
      - for all disks of the instance:
3785 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3786 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3787 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3788 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3789 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3790 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3791 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3792 a9e0c397 Iustin Pop
          not network enabled
3793 a9e0c397 Iustin Pop
      - wait for sync across all devices
3794 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3795 a9e0c397 Iustin Pop

3796 a9e0c397 Iustin Pop
    Failures are not very well handled.
3797 0834c866 Iustin Pop

3798 a9e0c397 Iustin Pop
    """
3799 0834c866 Iustin Pop
    steps_total = 6
3800 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3801 a9e0c397 Iustin Pop
    instance = self.instance
3802 a9e0c397 Iustin Pop
    iv_names = {}
3803 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3804 a9e0c397 Iustin Pop
    # start of work
3805 a9e0c397 Iustin Pop
    cfg = self.cfg
3806 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3807 a9e0c397 Iustin Pop
    new_node = self.new_node
3808 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3809 0834c866 Iustin Pop
3810 0834c866 Iustin Pop
    # Step: check device activation
3811 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3812 0834c866 Iustin Pop
    info("checking volume groups")
3813 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3814 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3815 0834c866 Iustin Pop
    if not results:
3816 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3817 0834c866 Iustin Pop
    for node in pri_node, new_node:
3818 0834c866 Iustin Pop
      res = results.get(node, False)
3819 0834c866 Iustin Pop
      if not res or my_vg not in res:
3820 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3821 0834c866 Iustin Pop
                                 (my_vg, node))
3822 0834c866 Iustin Pop
    for dev in instance.disks:
3823 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3824 0834c866 Iustin Pop
        continue
3825 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3826 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3827 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3828 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3829 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3830 0834c866 Iustin Pop
3831 0834c866 Iustin Pop
    # Step: check other node consistency
3832 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3833 0834c866 Iustin Pop
    for dev in instance.disks:
3834 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3835 0834c866 Iustin Pop
        continue
3836 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3837 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3838 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3839 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3840 0834c866 Iustin Pop
                                 pri_node)
3841 0834c866 Iustin Pop
3842 0834c866 Iustin Pop
    # Step: create new storage
3843 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3844 a9e0c397 Iustin Pop
    for dev in instance.disks:
3845 a9e0c397 Iustin Pop
      size = dev.size
3846 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3847 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3848 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3849 a9e0c397 Iustin Pop
      # are talking about the secondary node
3850 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3851 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3852 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3853 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3854 a9e0c397 Iustin Pop
                                   " node '%s'" %
3855 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3856 a9e0c397 Iustin Pop
3857 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3858 0834c866 Iustin Pop
3859 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3860 0834c866 Iustin Pop
    for dev in instance.disks:
3861 0834c866 Iustin Pop
      size = dev.size
3862 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3863 a9e0c397 Iustin Pop
      # create new devices on new_node
3864 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3865 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3866 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3867 a9e0c397 Iustin Pop
                              children=dev.children)
3868 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3869 3f78eef2 Iustin Pop
                                        new_drbd, False,
3870 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3871 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3872 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3873 a9e0c397 Iustin Pop
3874 0834c866 Iustin Pop
    for dev in instance.disks:
3875 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3876 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3877 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3878 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3879 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3880 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
3881 a9e0c397 Iustin Pop
3882 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
3883 642445d9 Iustin Pop
    done = 0
3884 642445d9 Iustin Pop
    for dev in instance.disks:
3885 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3886 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
3887 642445d9 Iustin Pop
      # detach from network
3888 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
3889 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
3890 642445d9 Iustin Pop
      # standalone state
3891 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
3892 642445d9 Iustin Pop
        done += 1
3893 642445d9 Iustin Pop
      else:
3894 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
3895 642445d9 Iustin Pop
                dev.iv_name)
3896 642445d9 Iustin Pop
3897 642445d9 Iustin Pop
    if not done:
3898 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
3899 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3900 642445d9 Iustin Pop
3901 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
3902 642445d9 Iustin Pop
    # the instance to point to the new secondary
3903 642445d9 Iustin Pop
    info("updating instance configuration")
3904 642445d9 Iustin Pop
    for dev in instance.disks:
3905 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3906 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3907 642445d9 Iustin Pop
    cfg.Update(instance)
3908 a9e0c397 Iustin Pop
3909 642445d9 Iustin Pop
    # and now perform the drbd attach
3910 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
3911 642445d9 Iustin Pop
    failures = []
3912 642445d9 Iustin Pop
    for dev in instance.disks:
3913 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3914 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
3915 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
3916 642445d9 Iustin Pop
      # is correct
3917 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3918 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3919 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3920 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
3921 a9e0c397 Iustin Pop
3922 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3923 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3924 a9e0c397 Iustin Pop
    # return value
3925 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3926 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3927 a9e0c397 Iustin Pop
3928 a9e0c397 Iustin Pop
    # so check manually all the devices
3929 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3930 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3931 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3932 a9e0c397 Iustin Pop
      if is_degr:
3933 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3934 a9e0c397 Iustin Pop
3935 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3936 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3937 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
3938 a9e0c397 Iustin Pop
      for lv in old_lvs:
3939 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3940 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3941 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
3942 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
3943 a9e0c397 Iustin Pop
3944 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3945 a9e0c397 Iustin Pop
    """Execute disk replacement.
3946 a9e0c397 Iustin Pop

3947 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3948 a9e0c397 Iustin Pop

3949 a9e0c397 Iustin Pop
    """
3950 a9e0c397 Iustin Pop
    instance = self.instance
3951 22985314 Guido Trotter
3952 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
3953 22985314 Guido Trotter
    if instance.status == "down":
3954 22985314 Guido Trotter
      op = opcodes.OpActivateInstanceDisks(instance_name=instance.name)
3955 22985314 Guido Trotter
      self.proc.ChainOpCode(op)
3956 22985314 Guido Trotter
3957 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3958 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
3959 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
3960 a9e0c397 Iustin Pop
      else:
3961 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
3962 a9e0c397 Iustin Pop
    else:
3963 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
3964 22985314 Guido Trotter
3965 22985314 Guido Trotter
    ret = fn(feedback_fn)
3966 22985314 Guido Trotter
3967 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
3968 22985314 Guido Trotter
    if instance.status == "down":
3969 22985314 Guido Trotter
      op = opcodes.OpDeactivateInstanceDisks(instance_name=instance.name)
3970 22985314 Guido Trotter
      self.proc.ChainOpCode(op)
3971 22985314 Guido Trotter
3972 22985314 Guido Trotter
    return ret
3973 a9e0c397 Iustin Pop
3974 a8083063 Iustin Pop
3975 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
3976 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
3977 8729e0d7 Iustin Pop

3978 8729e0d7 Iustin Pop
  """
3979 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
3980 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3981 8729e0d7 Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount"]
3982 8729e0d7 Iustin Pop
3983 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
3984 8729e0d7 Iustin Pop
    """Build hooks env.
3985 8729e0d7 Iustin Pop

3986 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3987 8729e0d7 Iustin Pop

3988 8729e0d7 Iustin Pop
    """
3989 8729e0d7 Iustin Pop
    env = {
3990 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
3991 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
3992 8729e0d7 Iustin Pop
      }
3993 8729e0d7 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3994 8729e0d7 Iustin Pop
    nl = [
3995 8729e0d7 Iustin Pop
      self.sstore.GetMasterNode(),
3996 8729e0d7 Iustin Pop
      self.instance.primary_node,
3997 8729e0d7 Iustin Pop
      ]
3998 8729e0d7 Iustin Pop
    return env, nl, nl
3999 8729e0d7 Iustin Pop
4000 8729e0d7 Iustin Pop
  def CheckPrereq(self):
4001 8729e0d7 Iustin Pop
    """Check prerequisites.
4002 8729e0d7 Iustin Pop

4003 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
4004 8729e0d7 Iustin Pop

4005 8729e0d7 Iustin Pop
    """
4006 8729e0d7 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4007 8729e0d7 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
4008 8729e0d7 Iustin Pop
    if instance is None:
4009 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
4010 8729e0d7 Iustin Pop
                                 self.op.instance_name)
4011 8729e0d7 Iustin Pop
    self.instance = instance
4012 8729e0d7 Iustin Pop
    self.op.instance_name = instance.name
4013 8729e0d7 Iustin Pop
4014 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4015 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
4016 8729e0d7 Iustin Pop
                                 " growing.")
4017 8729e0d7 Iustin Pop
4018 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
4019 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4020 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
4021 8729e0d7 Iustin Pop
4022 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4023 8729e0d7 Iustin Pop
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
4024 8729e0d7 Iustin Pop
    for node in nodenames:
4025 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
4026 8729e0d7 Iustin Pop
      if not info:
4027 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
4028 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
4029 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
4030 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
4031 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
4032 8729e0d7 Iustin Pop
                                   " node %s" % node)
4033 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
4034 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4035 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
4036 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
4037 8729e0d7 Iustin Pop
4038 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
4039 8729e0d7 Iustin Pop
    """Execute disk grow.
4040 8729e0d7 Iustin Pop

4041 8729e0d7 Iustin Pop
    """
4042 8729e0d7 Iustin Pop
    instance = self.instance
4043 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
4044 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4045 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
4046 8729e0d7 Iustin Pop
      result = rpc.call_blockdev_grow(node, disk, self.op.amount)
4047 8729e0d7 Iustin Pop
      if not result or not isinstance(result, tuple) or len(result) != 2:
4048 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
4049 8729e0d7 Iustin Pop
      elif not result[0]:
4050 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
4051 8729e0d7 Iustin Pop
                                 (node, result[1]))
4052 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
4053 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
4054 8729e0d7 Iustin Pop
    return
4055 8729e0d7 Iustin Pop
4056 8729e0d7 Iustin Pop
4057 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4058 a8083063 Iustin Pop
  """Query runtime instance data.
4059 a8083063 Iustin Pop

4060 a8083063 Iustin Pop
  """
4061 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
4062 a8083063 Iustin Pop
4063 a8083063 Iustin Pop
  def CheckPrereq(self):
4064 a8083063 Iustin Pop
    """Check prerequisites.
4065 a8083063 Iustin Pop

4066 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4067 a8083063 Iustin Pop

4068 a8083063 Iustin Pop
    """
4069 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
4070 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4071 a8083063 Iustin Pop
    if self.op.instances:
4072 a8083063 Iustin Pop
      self.wanted_instances = []
4073 a8083063 Iustin Pop
      names = self.op.instances
4074 a8083063 Iustin Pop
      for name in names:
4075 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
4076 a8083063 Iustin Pop
        if instance is None:
4077 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
4078 515207af Guido Trotter
        self.wanted_instances.append(instance)
4079 a8083063 Iustin Pop
    else:
4080 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4081 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
4082 a8083063 Iustin Pop
    return
4083 a8083063 Iustin Pop
4084 a8083063 Iustin Pop
4085 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4086 a8083063 Iustin Pop
    """Compute block device status.
4087 a8083063 Iustin Pop

4088 a8083063 Iustin Pop
    """
4089 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4090 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4091 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4092 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4093 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4094 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4095 a8083063 Iustin Pop
      else:
4096 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4097 a8083063 Iustin Pop
4098 a8083063 Iustin Pop
    if snode:
4099 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4100 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4101 a8083063 Iustin Pop
    else:
4102 a8083063 Iustin Pop
      dev_sstatus = None
4103 a8083063 Iustin Pop
4104 a8083063 Iustin Pop
    if dev.children:
4105 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4106 a8083063 Iustin Pop
                      for child in dev.children]
4107 a8083063 Iustin Pop
    else:
4108 a8083063 Iustin Pop
      dev_children = []
4109 a8083063 Iustin Pop
4110 a8083063 Iustin Pop
    data = {
4111 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4112 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4113 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4114 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4115 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4116 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4117 a8083063 Iustin Pop
      "children": dev_children,
4118 a8083063 Iustin Pop
      }
4119 a8083063 Iustin Pop
4120 a8083063 Iustin Pop
    return data
4121 a8083063 Iustin Pop
4122 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4123 a8083063 Iustin Pop
    """Gather and return data"""
4124 a8083063 Iustin Pop
    result = {}
4125 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4126 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4127 a8083063 Iustin Pop
                                                instance.name)
4128 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4129 a8083063 Iustin Pop
        remote_state = "up"
4130 a8083063 Iustin Pop
      else:
4131 a8083063 Iustin Pop
        remote_state = "down"
4132 a8083063 Iustin Pop
      if instance.status == "down":
4133 a8083063 Iustin Pop
        config_state = "down"
4134 a8083063 Iustin Pop
      else:
4135 a8083063 Iustin Pop
        config_state = "up"
4136 a8083063 Iustin Pop
4137 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4138 a8083063 Iustin Pop
               for device in instance.disks]
4139 a8083063 Iustin Pop
4140 a8083063 Iustin Pop
      idict = {
4141 a8083063 Iustin Pop
        "name": instance.name,
4142 a8083063 Iustin Pop
        "config_state": config_state,
4143 a8083063 Iustin Pop
        "run_state": remote_state,
4144 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4145 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4146 a8083063 Iustin Pop
        "os": instance.os,
4147 a8083063 Iustin Pop
        "memory": instance.memory,
4148 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4149 a8083063 Iustin Pop
        "disks": disks,
4150 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4151 a8083063 Iustin Pop
        }
4152 a8083063 Iustin Pop
4153 a8340917 Iustin Pop
      htkind = self.sstore.GetHypervisorType()
4154 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_PVM30:
4155 a8340917 Iustin Pop
        idict["kernel_path"] = instance.kernel_path
4156 a8340917 Iustin Pop
        idict["initrd_path"] = instance.initrd_path
4157 a8340917 Iustin Pop
4158 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_HVM31:
4159 a8340917 Iustin Pop
        idict["hvm_boot_order"] = instance.hvm_boot_order
4160 a8340917 Iustin Pop
        idict["hvm_acpi"] = instance.hvm_acpi
4161 a8340917 Iustin Pop
        idict["hvm_pae"] = instance.hvm_pae
4162 a8340917 Iustin Pop
        idict["hvm_cdrom_image_path"] = instance.hvm_cdrom_image_path
4163 5397e0b7 Alexander Schreiber
        idict["hvm_nic_type"] = instance.hvm_nic_type
4164 5397e0b7 Alexander Schreiber
        idict["hvm_disk_type"] = instance.hvm_disk_type
4165 a8340917 Iustin Pop
4166 a8340917 Iustin Pop
      if htkind in constants.HTS_REQ_PORT:
4167 d0c11cf7 Alexander Schreiber
        if instance.vnc_bind_address is None:
4168 d0c11cf7 Alexander Schreiber
          vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4169 d0c11cf7 Alexander Schreiber
        else:
4170 d0c11cf7 Alexander Schreiber
          vnc_bind_address = instance.vnc_bind_address
4171 34b6ab97 Alexander Schreiber
        if instance.network_port is None:
4172 34b6ab97 Alexander Schreiber
          vnc_console_port = None
4173 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.BIND_ADDRESS_GLOBAL:
4174 a4273aba Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.primary_node,
4175 34b6ab97 Alexander Schreiber
                                       instance.network_port)
4176 d0c11cf7 Alexander Schreiber
        elif vnc_bind_address == constants.LOCALHOST_IP_ADDRESS:
4177 d0c11cf7 Alexander Schreiber
          vnc_console_port = "%s:%s on node %s" % (vnc_bind_address,
4178 a4273aba Alexander Schreiber
                                                   instance.network_port,
4179 a4273aba Alexander Schreiber
                                                   instance.primary_node)
4180 34b6ab97 Alexander Schreiber
        else:
4181 34b6ab97 Alexander Schreiber
          vnc_console_port = "%s:%s" % (instance.vnc_bind_address,
4182 34b6ab97 Alexander Schreiber
                                        instance.network_port)
4183 34b6ab97 Alexander Schreiber
        idict["vnc_console_port"] = vnc_console_port
4184 d0c11cf7 Alexander Schreiber
        idict["vnc_bind_address"] = vnc_bind_address
4185 a8340917 Iustin Pop
        idict["network_port"] = instance.network_port
4186 a8340917 Iustin Pop
4187 a8083063 Iustin Pop
      result[instance.name] = idict
4188 a8083063 Iustin Pop
4189 a8083063 Iustin Pop
    return result
4190 a8083063 Iustin Pop
4191 a8083063 Iustin Pop
4192 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4193 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4194 a8083063 Iustin Pop

4195 a8083063 Iustin Pop
  """
4196 a8083063 Iustin Pop
  HPATH = "instance-modify"
4197 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4198 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4199 1a5c7281 Guido Trotter
  REQ_BGL = False
4200 1a5c7281 Guido Trotter
4201 1a5c7281 Guido Trotter
  def ExpandNames(self):
4202 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
4203 a8083063 Iustin Pop
4204 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4205 a8083063 Iustin Pop
    """Build hooks env.
4206 a8083063 Iustin Pop

4207 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4208 a8083063 Iustin Pop

4209 a8083063 Iustin Pop
    """
4210 396e1b78 Michael Hanselmann
    args = dict()
4211 a8083063 Iustin Pop
    if self.mem:
4212 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4213 a8083063 Iustin Pop
    if self.vcpus:
4214 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4215 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4216 396e1b78 Michael Hanselmann
      if self.do_ip:
4217 396e1b78 Michael Hanselmann
        ip = self.ip
4218 396e1b78 Michael Hanselmann
      else:
4219 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4220 396e1b78 Michael Hanselmann
      if self.bridge:
4221 396e1b78 Michael Hanselmann
        bridge = self.bridge
4222 396e1b78 Michael Hanselmann
      else:
4223 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4224 ef756965 Iustin Pop
      if self.mac:
4225 ef756965 Iustin Pop
        mac = self.mac
4226 ef756965 Iustin Pop
      else:
4227 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4228 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4229 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4230 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4231 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4232 a8083063 Iustin Pop
    return env, nl, nl
4233 a8083063 Iustin Pop
4234 a8083063 Iustin Pop
  def CheckPrereq(self):
4235 a8083063 Iustin Pop
    """Check prerequisites.
4236 a8083063 Iustin Pop

4237 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4238 a8083063 Iustin Pop

4239 a8083063 Iustin Pop
    """
4240 1a5c7281 Guido Trotter
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4241 1a5c7281 Guido Trotter
    # a separate CheckArguments function, if we implement one, so the operation
4242 1a5c7281 Guido Trotter
    # can be aborted without waiting for any lock, should it have an error...
4243 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4244 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4245 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4246 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4247 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4248 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4249 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4250 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4251 31a853d2 Iustin Pop
    self.hvm_acpi = getattr(self.op, "hvm_acpi", None)
4252 31a853d2 Iustin Pop
    self.hvm_pae = getattr(self.op, "hvm_pae", None)
4253 5397e0b7 Alexander Schreiber
    self.hvm_nic_type = getattr(self.op, "hvm_nic_type", None)
4254 5397e0b7 Alexander Schreiber
    self.hvm_disk_type = getattr(self.op, "hvm_disk_type", None)
4255 31a853d2 Iustin Pop
    self.hvm_cdrom_image_path = getattr(self.op, "hvm_cdrom_image_path", None)
4256 31a853d2 Iustin Pop
    self.vnc_bind_address = getattr(self.op, "vnc_bind_address", None)
4257 4300c4b6 Guido Trotter
    self.force = getattr(self.op, "force", None)
4258 31a853d2 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4259 31a853d2 Iustin Pop
                 self.kernel_path, self.initrd_path, self.hvm_boot_order,
4260 31a853d2 Iustin Pop
                 self.hvm_acpi, self.hvm_pae, self.hvm_cdrom_image_path,
4261 5397e0b7 Alexander Schreiber
                 self.vnc_bind_address, self.hvm_nic_type, self.hvm_disk_type]
4262 31a853d2 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4263 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4264 a8083063 Iustin Pop
    if self.mem is not None:
4265 a8083063 Iustin Pop
      try:
4266 a8083063 Iustin Pop
        self.mem = int(self.mem)
4267 a8083063 Iustin Pop
      except ValueError, err:
4268 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4269 a8083063 Iustin Pop
    if self.vcpus is not None:
4270 a8083063 Iustin Pop
      try:
4271 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4272 a8083063 Iustin Pop
      except ValueError, err:
4273 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4274 a8083063 Iustin Pop
    if self.ip is not None:
4275 a8083063 Iustin Pop
      self.do_ip = True
4276 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4277 a8083063 Iustin Pop
        self.ip = None
4278 a8083063 Iustin Pop
      else:
4279 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4280 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4281 a8083063 Iustin Pop
    else:
4282 a8083063 Iustin Pop
      self.do_ip = False
4283 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4284 1862d460 Alexander Schreiber
    if self.mac is not None:
4285 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4286 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4287 1862d460 Alexander Schreiber
                                   self.mac)
4288 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4289 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4290 a8083063 Iustin Pop
4291 973d7867 Iustin Pop
    if self.kernel_path is not None:
4292 973d7867 Iustin Pop
      self.do_kernel_path = True
4293 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4294 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4295 973d7867 Iustin Pop
4296 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4297 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4298 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4299 973d7867 Iustin Pop
                                    " filename")
4300 8cafeb26 Iustin Pop
    else:
4301 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4302 973d7867 Iustin Pop
4303 973d7867 Iustin Pop
    if self.initrd_path is not None:
4304 973d7867 Iustin Pop
      self.do_initrd_path = True
4305 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4306 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4307 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4308 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4309 973d7867 Iustin Pop
                                    " filename")
4310 8cafeb26 Iustin Pop
    else:
4311 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4312 973d7867 Iustin Pop
4313 25c5878d Alexander Schreiber
    # boot order verification
4314 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4315 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4316 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4317 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4318 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4319 25c5878d Alexander Schreiber
                                     " or 'default'")
4320 25c5878d Alexander Schreiber
4321 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
4322 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
4323 3fc175f0 Alexander Schreiber
      if not (os.path.isabs(self.op.hvm_cdrom_image_path) or
4324 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4325 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
4326 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
4327 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4328 3fc175f0 Alexander Schreiber
      if not (os.path.isfile(self.op.hvm_cdrom_image_path) or
4329 3fc175f0 Alexander Schreiber
              self.op.hvm_cdrom_image_path.lower() == "none"):
4330 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
4331 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
4332 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
4333 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4334 31a853d2 Iustin Pop
4335 31a853d2 Iustin Pop
    # vnc_bind_address verification
4336 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
4337 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
4338 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
4339 31a853d2 Iustin Pop
                                   " like a valid IP address" %
4340 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
4341 31a853d2 Iustin Pop
4342 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4343 1a5c7281 Guido Trotter
    assert self.instance is not None, \
4344 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4345 cfefe007 Guido Trotter
    self.warn = []
4346 cfefe007 Guido Trotter
    if self.mem is not None and not self.force:
4347 cfefe007 Guido Trotter
      pnode = self.instance.primary_node
4348 cfefe007 Guido Trotter
      nodelist = [pnode]
4349 cfefe007 Guido Trotter
      nodelist.extend(instance.secondary_nodes)
4350 cfefe007 Guido Trotter
      instance_info = rpc.call_instance_info(pnode, instance.name)
4351 cfefe007 Guido Trotter
      nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
4352 cfefe007 Guido Trotter
4353 cfefe007 Guido Trotter
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4354 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
4355 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
4356 cfefe007 Guido Trotter
      else:
4357 cfefe007 Guido Trotter
        if instance_info:
4358 cfefe007 Guido Trotter
          current_mem = instance_info['memory']
4359 cfefe007 Guido Trotter
        else:
4360 cfefe007 Guido Trotter
          # Assume instance not running
4361 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
4362 cfefe007 Guido Trotter
          # and we have no other way to check)
4363 cfefe007 Guido Trotter
          current_mem = 0
4364 cfefe007 Guido Trotter
        miss_mem = self.mem - current_mem - nodeinfo[pnode]['memory_free']
4365 cfefe007 Guido Trotter
        if miss_mem > 0:
4366 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
4367 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
4368 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
4369 cfefe007 Guido Trotter
4370 cfefe007 Guido Trotter
      for node in instance.secondary_nodes:
4371 cfefe007 Guido Trotter
        if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4372 cfefe007 Guido Trotter
          self.warn.append("Can't get info from secondary node %s" % node)
4373 cfefe007 Guido Trotter
        elif self.mem > nodeinfo[node]['memory_free']:
4374 cfefe007 Guido Trotter
          self.warn.append("Not enough memory to failover instance to secondary"
4375 cfefe007 Guido Trotter
                           " node %s" % node)
4376 cfefe007 Guido Trotter
4377 5bc84f33 Alexander Schreiber
    # Xen HVM device type checks
4378 5bc84f33 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
4379 5bc84f33 Alexander Schreiber
      if self.op.hvm_nic_type is not None:
4380 5bc84f33 Alexander Schreiber
        if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
4381 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid NIC type %s specified for Xen"
4382 5bc84f33 Alexander Schreiber
                                     " HVM  hypervisor" % self.op.hvm_nic_type)
4383 5bc84f33 Alexander Schreiber
      if self.op.hvm_disk_type is not None:
4384 5bc84f33 Alexander Schreiber
        if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
4385 5bc84f33 Alexander Schreiber
          raise errors.OpPrereqError("Invalid disk type %s specified for Xen"
4386 5bc84f33 Alexander Schreiber
                                     " HVM hypervisor" % self.op.hvm_disk_type)
4387 5bc84f33 Alexander Schreiber
4388 a8083063 Iustin Pop
    return
4389 a8083063 Iustin Pop
4390 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4391 a8083063 Iustin Pop
    """Modifies an instance.
4392 a8083063 Iustin Pop

4393 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4394 a8083063 Iustin Pop
    """
4395 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
4396 cfefe007 Guido Trotter
    # feedback_fn there.
4397 cfefe007 Guido Trotter
    for warn in self.warn:
4398 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
4399 cfefe007 Guido Trotter
4400 a8083063 Iustin Pop
    result = []
4401 a8083063 Iustin Pop
    instance = self.instance
4402 a8083063 Iustin Pop
    if self.mem:
4403 a8083063 Iustin Pop
      instance.memory = self.mem
4404 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4405 a8083063 Iustin Pop
    if self.vcpus:
4406 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4407 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4408 a8083063 Iustin Pop
    if self.do_ip:
4409 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4410 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4411 a8083063 Iustin Pop
    if self.bridge:
4412 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4413 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4414 1862d460 Alexander Schreiber
    if self.mac:
4415 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4416 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4417 973d7867 Iustin Pop
    if self.do_kernel_path:
4418 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4419 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4420 973d7867 Iustin Pop
    if self.do_initrd_path:
4421 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4422 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4423 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4424 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4425 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4426 25c5878d Alexander Schreiber
      else:
4427 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4428 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4429 3fc175f0 Alexander Schreiber
    if self.hvm_acpi is not None:
4430 ec1ba002 Iustin Pop
      instance.hvm_acpi = self.hvm_acpi
4431 31a853d2 Iustin Pop
      result.append(("hvm_acpi", self.hvm_acpi))
4432 3fc175f0 Alexander Schreiber
    if self.hvm_pae is not None:
4433 ec1ba002 Iustin Pop
      instance.hvm_pae = self.hvm_pae
4434 31a853d2 Iustin Pop
      result.append(("hvm_pae", self.hvm_pae))
4435 5397e0b7 Alexander Schreiber
    if self.hvm_nic_type is not None:
4436 5397e0b7 Alexander Schreiber
      instance.hvm_nic_type = self.hvm_nic_type
4437 5397e0b7 Alexander Schreiber
      result.append(("hvm_nic_type", self.hvm_nic_type))
4438 5397e0b7 Alexander Schreiber
    if self.hvm_disk_type is not None:
4439 5397e0b7 Alexander Schreiber
      instance.hvm_disk_type = self.hvm_disk_type
4440 5397e0b7 Alexander Schreiber
      result.append(("hvm_disk_type", self.hvm_disk_type))
4441 31a853d2 Iustin Pop
    if self.hvm_cdrom_image_path:
4442 3fc175f0 Alexander Schreiber
      if self.hvm_cdrom_image_path == constants.VALUE_NONE:
4443 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = None
4444 3fc175f0 Alexander Schreiber
      else:
4445 3fc175f0 Alexander Schreiber
        instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
4446 31a853d2 Iustin Pop
      result.append(("hvm_cdrom_image_path", self.hvm_cdrom_image_path))
4447 31a853d2 Iustin Pop
    if self.vnc_bind_address:
4448 31a853d2 Iustin Pop
      instance.vnc_bind_address = self.vnc_bind_address
4449 31a853d2 Iustin Pop
      result.append(("vnc_bind_address", self.vnc_bind_address))
4450 a8083063 Iustin Pop
4451 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
4452 a8083063 Iustin Pop
4453 a8083063 Iustin Pop
    return result
4454 a8083063 Iustin Pop
4455 a8083063 Iustin Pop
4456 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4457 a8083063 Iustin Pop
  """Query the exports list
4458 a8083063 Iustin Pop

4459 a8083063 Iustin Pop
  """
4460 a8083063 Iustin Pop
  _OP_REQP = []
4461 a8083063 Iustin Pop
4462 a8083063 Iustin Pop
  def CheckPrereq(self):
4463 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4464 a8083063 Iustin Pop

4465 a8083063 Iustin Pop
    """
4466 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4467 a8083063 Iustin Pop
4468 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4469 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4470 a8083063 Iustin Pop

4471 a8083063 Iustin Pop
    Returns:
4472 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4473 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4474 a8083063 Iustin Pop
      that node.
4475 a8083063 Iustin Pop

4476 a8083063 Iustin Pop
    """
4477 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4478 a8083063 Iustin Pop
4479 a8083063 Iustin Pop
4480 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4481 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4482 a8083063 Iustin Pop

4483 a8083063 Iustin Pop
  """
4484 a8083063 Iustin Pop
  HPATH = "instance-export"
4485 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4486 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4487 a8083063 Iustin Pop
4488 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4489 a8083063 Iustin Pop
    """Build hooks env.
4490 a8083063 Iustin Pop

4491 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4492 a8083063 Iustin Pop

4493 a8083063 Iustin Pop
    """
4494 a8083063 Iustin Pop
    env = {
4495 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4496 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4497 a8083063 Iustin Pop
      }
4498 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4499 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4500 a8083063 Iustin Pop
          self.op.target_node]
4501 a8083063 Iustin Pop
    return env, nl, nl
4502 a8083063 Iustin Pop
4503 a8083063 Iustin Pop
  def CheckPrereq(self):
4504 a8083063 Iustin Pop
    """Check prerequisites.
4505 a8083063 Iustin Pop

4506 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4507 a8083063 Iustin Pop

4508 a8083063 Iustin Pop
    """
4509 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4510 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4511 a8083063 Iustin Pop
    if self.instance is None:
4512 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4513 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4514 a8083063 Iustin Pop
4515 a8083063 Iustin Pop
    # node verification
4516 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4517 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4518 a8083063 Iustin Pop
4519 a8083063 Iustin Pop
    if self.dst_node is None:
4520 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4521 3ecf6786 Iustin Pop
                                 self.op.target_node)
4522 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4523 a8083063 Iustin Pop
4524 b6023d6c Manuel Franceschini
    # instance disk type verification
4525 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4526 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4527 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4528 b6023d6c Manuel Franceschini
                                   " file-based disks")
4529 b6023d6c Manuel Franceschini
4530 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4531 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4532 a8083063 Iustin Pop

4533 a8083063 Iustin Pop
    """
4534 a8083063 Iustin Pop
    instance = self.instance
4535 a8083063 Iustin Pop
    dst_node = self.dst_node
4536 a8083063 Iustin Pop
    src_node = instance.primary_node
4537 a8083063 Iustin Pop
    if self.op.shutdown:
4538 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4539 fb300fb7 Guido Trotter
      if not rpc.call_instance_shutdown(src_node, instance):
4540 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4541 38206f3c Iustin Pop
                                 (instance.name, src_node))
4542 a8083063 Iustin Pop
4543 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4544 a8083063 Iustin Pop
4545 a8083063 Iustin Pop
    snap_disks = []
4546 a8083063 Iustin Pop
4547 a8083063 Iustin Pop
    try:
4548 a8083063 Iustin Pop
      for disk in instance.disks:
4549 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4550 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4551 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4552 a8083063 Iustin Pop
4553 a8083063 Iustin Pop
          if not new_dev_name:
4554 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4555 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4556 a8083063 Iustin Pop
          else:
4557 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4558 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4559 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4560 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4561 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4562 a8083063 Iustin Pop
4563 a8083063 Iustin Pop
    finally:
4564 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4565 fb300fb7 Guido Trotter
        if not rpc.call_instance_start(src_node, instance, None):
4566 fb300fb7 Guido Trotter
          _ShutdownInstanceDisks(instance, self.cfg)
4567 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4568 a8083063 Iustin Pop
4569 a8083063 Iustin Pop
    # TODO: check for size
4570 a8083063 Iustin Pop
4571 a8083063 Iustin Pop
    for dev in snap_disks:
4572 16687b98 Manuel Franceschini
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
4573 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4574 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4575 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4576 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4577 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4578 a8083063 Iustin Pop
4579 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4580 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4581 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4582 a8083063 Iustin Pop
4583 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4584 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4585 a8083063 Iustin Pop
4586 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4587 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4588 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4589 a8083063 Iustin Pop
    if nodelist:
4590 204f2086 Guido Trotter
      exportlist = rpc.call_export_list(nodelist)
4591 a8083063 Iustin Pop
      for node in exportlist:
4592 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4593 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4594 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4595 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4596 5c947f38 Iustin Pop
4597 5c947f38 Iustin Pop
4598 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4599 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4600 9ac99fda Guido Trotter

4601 9ac99fda Guido Trotter
  """
4602 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4603 9ac99fda Guido Trotter
4604 9ac99fda Guido Trotter
  def CheckPrereq(self):
4605 9ac99fda Guido Trotter
    """Check prerequisites.
4606 9ac99fda Guido Trotter
    """
4607 9ac99fda Guido Trotter
    pass
4608 9ac99fda Guido Trotter
4609 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4610 9ac99fda Guido Trotter
    """Remove any export.
4611 9ac99fda Guido Trotter

4612 9ac99fda Guido Trotter
    """
4613 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4614 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4615 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4616 9ac99fda Guido Trotter
    fqdn_warn = False
4617 9ac99fda Guido Trotter
    if not instance_name:
4618 9ac99fda Guido Trotter
      fqdn_warn = True
4619 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4620 9ac99fda Guido Trotter
4621 204f2086 Guido Trotter
    exportlist = rpc.call_export_list(self.cfg.GetNodeList())
4622 9ac99fda Guido Trotter
    found = False
4623 9ac99fda Guido Trotter
    for node in exportlist:
4624 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4625 9ac99fda Guido Trotter
        found = True
4626 9ac99fda Guido Trotter
        if not rpc.call_export_remove(node, instance_name):
4627 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4628 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4629 9ac99fda Guido Trotter
4630 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4631 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4632 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4633 9ac99fda Guido Trotter
                  " Domain Name.")
4634 9ac99fda Guido Trotter
4635 9ac99fda Guido Trotter
4636 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4637 5c947f38 Iustin Pop
  """Generic tags LU.
4638 5c947f38 Iustin Pop

4639 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4640 5c947f38 Iustin Pop

4641 5c947f38 Iustin Pop
  """
4642 5c947f38 Iustin Pop
  def CheckPrereq(self):
4643 5c947f38 Iustin Pop
    """Check prerequisites.
4644 5c947f38 Iustin Pop

4645 5c947f38 Iustin Pop
    """
4646 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4647 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4648 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4649 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4650 5c947f38 Iustin Pop
      if name is None:
4651 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4652 3ecf6786 Iustin Pop
                                   (self.op.name,))
4653 5c947f38 Iustin Pop
      self.op.name = name
4654 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4655 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4656 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4657 5c947f38 Iustin Pop
      if name is None:
4658 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4659 3ecf6786 Iustin Pop
                                   (self.op.name,))
4660 5c947f38 Iustin Pop
      self.op.name = name
4661 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4662 5c947f38 Iustin Pop
    else:
4663 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4664 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4665 5c947f38 Iustin Pop
4666 5c947f38 Iustin Pop
4667 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4668 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4669 5c947f38 Iustin Pop

4670 5c947f38 Iustin Pop
  """
4671 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4672 5c947f38 Iustin Pop
4673 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4674 5c947f38 Iustin Pop
    """Returns the tag list.
4675 5c947f38 Iustin Pop

4676 5c947f38 Iustin Pop
    """
4677 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
4678 5c947f38 Iustin Pop
4679 5c947f38 Iustin Pop
4680 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4681 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4682 73415719 Iustin Pop

4683 73415719 Iustin Pop
  """
4684 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4685 73415719 Iustin Pop
4686 73415719 Iustin Pop
  def CheckPrereq(self):
4687 73415719 Iustin Pop
    """Check prerequisites.
4688 73415719 Iustin Pop

4689 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4690 73415719 Iustin Pop

4691 73415719 Iustin Pop
    """
4692 73415719 Iustin Pop
    try:
4693 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4694 73415719 Iustin Pop
    except re.error, err:
4695 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4696 73415719 Iustin Pop
                                 (self.op.pattern, err))
4697 73415719 Iustin Pop
4698 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4699 73415719 Iustin Pop
    """Returns the tag list.
4700 73415719 Iustin Pop

4701 73415719 Iustin Pop
    """
4702 73415719 Iustin Pop
    cfg = self.cfg
4703 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4704 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4705 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4706 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4707 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4708 73415719 Iustin Pop
    results = []
4709 73415719 Iustin Pop
    for path, target in tgts:
4710 73415719 Iustin Pop
      for tag in target.GetTags():
4711 73415719 Iustin Pop
        if self.re.search(tag):
4712 73415719 Iustin Pop
          results.append((path, tag))
4713 73415719 Iustin Pop
    return results
4714 73415719 Iustin Pop
4715 73415719 Iustin Pop
4716 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4717 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4718 5c947f38 Iustin Pop

4719 5c947f38 Iustin Pop
  """
4720 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4721 5c947f38 Iustin Pop
4722 5c947f38 Iustin Pop
  def CheckPrereq(self):
4723 5c947f38 Iustin Pop
    """Check prerequisites.
4724 5c947f38 Iustin Pop

4725 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4726 5c947f38 Iustin Pop

4727 5c947f38 Iustin Pop
    """
4728 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4729 f27302fa Iustin Pop
    for tag in self.op.tags:
4730 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4731 5c947f38 Iustin Pop
4732 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4733 5c947f38 Iustin Pop
    """Sets the tag.
4734 5c947f38 Iustin Pop

4735 5c947f38 Iustin Pop
    """
4736 5c947f38 Iustin Pop
    try:
4737 f27302fa Iustin Pop
      for tag in self.op.tags:
4738 f27302fa Iustin Pop
        self.target.AddTag(tag)
4739 5c947f38 Iustin Pop
    except errors.TagError, err:
4740 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4741 5c947f38 Iustin Pop
    try:
4742 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4743 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4744 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4745 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4746 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4747 5c947f38 Iustin Pop
4748 5c947f38 Iustin Pop
4749 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4750 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4751 5c947f38 Iustin Pop

4752 5c947f38 Iustin Pop
  """
4753 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4754 5c947f38 Iustin Pop
4755 5c947f38 Iustin Pop
  def CheckPrereq(self):
4756 5c947f38 Iustin Pop
    """Check prerequisites.
4757 5c947f38 Iustin Pop

4758 5c947f38 Iustin Pop
    This checks that we have the given tag.
4759 5c947f38 Iustin Pop

4760 5c947f38 Iustin Pop
    """
4761 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4762 f27302fa Iustin Pop
    for tag in self.op.tags:
4763 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4764 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4765 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4766 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4767 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4768 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4769 f27302fa Iustin Pop
      diff_names.sort()
4770 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4771 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4772 5c947f38 Iustin Pop
4773 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4774 5c947f38 Iustin Pop
    """Remove the tag from the object.
4775 5c947f38 Iustin Pop

4776 5c947f38 Iustin Pop
    """
4777 f27302fa Iustin Pop
    for tag in self.op.tags:
4778 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4779 5c947f38 Iustin Pop
    try:
4780 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4781 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4782 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4783 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4784 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4785 06009e27 Iustin Pop
4786 0eed6e61 Guido Trotter
4787 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
4788 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
4789 06009e27 Iustin Pop

4790 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
4791 06009e27 Iustin Pop
  time.
4792 06009e27 Iustin Pop

4793 06009e27 Iustin Pop
  """
4794 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
4795 fbe9022f Guido Trotter
  REQ_BGL = False
4796 06009e27 Iustin Pop
4797 fbe9022f Guido Trotter
  def ExpandNames(self):
4798 fbe9022f Guido Trotter
    """Expand names and set required locks.
4799 06009e27 Iustin Pop

4800 fbe9022f Guido Trotter
    This expands the node list, if any.
4801 06009e27 Iustin Pop

4802 06009e27 Iustin Pop
    """
4803 fbe9022f Guido Trotter
    self.needed_locks = {}
4804 06009e27 Iustin Pop
    if self.op.on_nodes:
4805 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
4806 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
4807 fbe9022f Guido Trotter
      # more information.
4808 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
4809 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
4810 fbe9022f Guido Trotter
4811 fbe9022f Guido Trotter
  def CheckPrereq(self):
4812 fbe9022f Guido Trotter
    """Check prerequisites.
4813 fbe9022f Guido Trotter

4814 fbe9022f Guido Trotter
    """
4815 06009e27 Iustin Pop
4816 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
4817 06009e27 Iustin Pop
    """Do the actual sleep.
4818 06009e27 Iustin Pop

4819 06009e27 Iustin Pop
    """
4820 06009e27 Iustin Pop
    if self.op.on_master:
4821 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
4822 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
4823 06009e27 Iustin Pop
    if self.op.on_nodes:
4824 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
4825 06009e27 Iustin Pop
      if not result:
4826 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
4827 06009e27 Iustin Pop
      for node, node_result in result.items():
4828 06009e27 Iustin Pop
        if not node_result:
4829 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
4830 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
4831 d61df03e Iustin Pop
4832 d61df03e Iustin Pop
4833 d1c2dd75 Iustin Pop
class IAllocator(object):
4834 d1c2dd75 Iustin Pop
  """IAllocator framework.
4835 d61df03e Iustin Pop

4836 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
4837 d1c2dd75 Iustin Pop
    - cfg/sstore that are needed to query the cluster
4838 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
4839 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
4840 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
4841 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
4842 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
4843 d1c2dd75 Iustin Pop
      easy usage
4844 d61df03e Iustin Pop

4845 d61df03e Iustin Pop
  """
4846 29859cb7 Iustin Pop
  _ALLO_KEYS = [
4847 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
4848 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
4849 d1c2dd75 Iustin Pop
    ]
4850 29859cb7 Iustin Pop
  _RELO_KEYS = [
4851 29859cb7 Iustin Pop
    "relocate_from",
4852 29859cb7 Iustin Pop
    ]
4853 d1c2dd75 Iustin Pop
4854 29859cb7 Iustin Pop
  def __init__(self, cfg, sstore, mode, name, **kwargs):
4855 d1c2dd75 Iustin Pop
    self.cfg = cfg
4856 d1c2dd75 Iustin Pop
    self.sstore = sstore
4857 d1c2dd75 Iustin Pop
    # init buffer variables
4858 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
4859 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
4860 29859cb7 Iustin Pop
    self.mode = mode
4861 29859cb7 Iustin Pop
    self.name = name
4862 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
4863 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
4864 29859cb7 Iustin Pop
    self.relocate_from = None
4865 27579978 Iustin Pop
    # computed fields
4866 27579978 Iustin Pop
    self.required_nodes = None
4867 d1c2dd75 Iustin Pop
    # init result fields
4868 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
4869 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
4870 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
4871 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
4872 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
4873 29859cb7 Iustin Pop
    else:
4874 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
4875 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
4876 d1c2dd75 Iustin Pop
    for key in kwargs:
4877 29859cb7 Iustin Pop
      if key not in keyset:
4878 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
4879 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4880 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
4881 29859cb7 Iustin Pop
    for key in keyset:
4882 d1c2dd75 Iustin Pop
      if key not in kwargs:
4883 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
4884 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4885 d1c2dd75 Iustin Pop
    self._BuildInputData()
4886 d1c2dd75 Iustin Pop
4887 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
4888 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
4889 d1c2dd75 Iustin Pop

4890 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
4891 d1c2dd75 Iustin Pop

4892 d1c2dd75 Iustin Pop
    """
4893 d1c2dd75 Iustin Pop
    cfg = self.cfg
4894 d1c2dd75 Iustin Pop
    # cluster data
4895 d1c2dd75 Iustin Pop
    data = {
4896 d1c2dd75 Iustin Pop
      "version": 1,
4897 d1c2dd75 Iustin Pop
      "cluster_name": self.sstore.GetClusterName(),
4898 d1c2dd75 Iustin Pop
      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
4899 6286519f Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
4900 d1c2dd75 Iustin Pop
      # we don't have job IDs
4901 d61df03e Iustin Pop
      }
4902 d61df03e Iustin Pop
4903 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
4904 6286519f Iustin Pop
4905 d1c2dd75 Iustin Pop
    # node data
4906 d1c2dd75 Iustin Pop
    node_results = {}
4907 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
4908 d1c2dd75 Iustin Pop
    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
4909 d1c2dd75 Iustin Pop
    for nname in node_list:
4910 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
4911 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
4912 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
4913 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
4914 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
4915 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
4916 d1c2dd75 Iustin Pop
        if attr not in remote_info:
4917 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
4918 d1c2dd75 Iustin Pop
                                   (nname, attr))
4919 d1c2dd75 Iustin Pop
        try:
4920 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
4921 d1c2dd75 Iustin Pop
        except ValueError, err:
4922 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
4923 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
4924 6286519f Iustin Pop
      # compute memory used by primary instances
4925 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
4926 6286519f Iustin Pop
      for iinfo in i_list:
4927 6286519f Iustin Pop
        if iinfo.primary_node == nname:
4928 6286519f Iustin Pop
          i_p_mem += iinfo.memory
4929 6286519f Iustin Pop
          if iinfo.status == "up":
4930 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
4931 6286519f Iustin Pop
4932 b2662e7f Iustin Pop
      # compute memory used by instances
4933 d1c2dd75 Iustin Pop
      pnr = {
4934 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
4935 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
4936 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
4937 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
4938 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
4939 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
4940 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
4941 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
4942 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
4943 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
4944 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
4945 d1c2dd75 Iustin Pop
        }
4946 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
4947 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
4948 d1c2dd75 Iustin Pop
4949 d1c2dd75 Iustin Pop
    # instance data
4950 d1c2dd75 Iustin Pop
    instance_data = {}
4951 6286519f Iustin Pop
    for iinfo in i_list:
4952 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
4953 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
4954 d1c2dd75 Iustin Pop
      pir = {
4955 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
4956 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
4957 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
4958 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
4959 d1c2dd75 Iustin Pop
        "os": iinfo.os,
4960 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
4961 d1c2dd75 Iustin Pop
        "nics": nic_data,
4962 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
4963 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
4964 d1c2dd75 Iustin Pop
        }
4965 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
4966 d61df03e Iustin Pop
4967 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
4968 d61df03e Iustin Pop
4969 d1c2dd75 Iustin Pop
    self.in_data = data
4970 d61df03e Iustin Pop
4971 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
4972 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
4973 d61df03e Iustin Pop

4974 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
4975 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
4976 d61df03e Iustin Pop

4977 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
4978 d1c2dd75 Iustin Pop
    done.
4979 d61df03e Iustin Pop

4980 d1c2dd75 Iustin Pop
    """
4981 d1c2dd75 Iustin Pop
    data = self.in_data
4982 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
4983 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
4984 d1c2dd75 Iustin Pop
4985 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
4986 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
4987 d1c2dd75 Iustin Pop
4988 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
4989 27579978 Iustin Pop
      self.required_nodes = 2
4990 27579978 Iustin Pop
    else:
4991 27579978 Iustin Pop
      self.required_nodes = 1
4992 d1c2dd75 Iustin Pop
    request = {
4993 d1c2dd75 Iustin Pop
      "type": "allocate",
4994 d1c2dd75 Iustin Pop
      "name": self.name,
4995 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
4996 d1c2dd75 Iustin Pop
      "tags": self.tags,
4997 d1c2dd75 Iustin Pop
      "os": self.os,
4998 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
4999 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5000 d1c2dd75 Iustin Pop
      "disks": self.disks,
5001 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5002 d1c2dd75 Iustin Pop
      "nics": self.nics,
5003 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5004 d1c2dd75 Iustin Pop
      }
5005 d1c2dd75 Iustin Pop
    data["request"] = request
5006 298fe380 Iustin Pop
5007 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5008 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5009 298fe380 Iustin Pop

5010 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5011 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5012 d61df03e Iustin Pop

5013 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5014 d1c2dd75 Iustin Pop
    done.
5015 d61df03e Iustin Pop

5016 d1c2dd75 Iustin Pop
    """
5017 27579978 Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.name)
5018 27579978 Iustin Pop
    if instance is None:
5019 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5020 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5021 27579978 Iustin Pop
5022 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5023 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5024 27579978 Iustin Pop
5025 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5026 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5027 2a139bb0 Iustin Pop
5028 27579978 Iustin Pop
    self.required_nodes = 1
5029 27579978 Iustin Pop
5030 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
5031 27579978 Iustin Pop
                                  instance.disks[0].size,
5032 27579978 Iustin Pop
                                  instance.disks[1].size)
5033 27579978 Iustin Pop
5034 d1c2dd75 Iustin Pop
    request = {
5035 2a139bb0 Iustin Pop
      "type": "relocate",
5036 d1c2dd75 Iustin Pop
      "name": self.name,
5037 27579978 Iustin Pop
      "disk_space_total": disk_space,
5038 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5039 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5040 d1c2dd75 Iustin Pop
      }
5041 27579978 Iustin Pop
    self.in_data["request"] = request
5042 d61df03e Iustin Pop
5043 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5044 d1c2dd75 Iustin Pop
    """Build input data structures.
5045 d61df03e Iustin Pop

5046 d1c2dd75 Iustin Pop
    """
5047 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5048 d61df03e Iustin Pop
5049 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5050 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5051 d1c2dd75 Iustin Pop
    else:
5052 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5053 d61df03e Iustin Pop
5054 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5055 d61df03e Iustin Pop
5056 8d528b7c Iustin Pop
  def Run(self, name, validate=True, call_fn=rpc.call_iallocator_runner):
5057 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5058 298fe380 Iustin Pop

5059 d1c2dd75 Iustin Pop
    """
5060 d1c2dd75 Iustin Pop
    data = self.in_text
5061 298fe380 Iustin Pop
5062 8d528b7c Iustin Pop
    result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
5063 298fe380 Iustin Pop
5064 8d528b7c Iustin Pop
    if not isinstance(result, tuple) or len(result) != 4:
5065 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5066 8d528b7c Iustin Pop
5067 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5068 8d528b7c Iustin Pop
5069 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5070 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5071 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5072 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
5073 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
5074 8d528b7c Iustin Pop
    self.out_text = stdout
5075 d1c2dd75 Iustin Pop
    if validate:
5076 d1c2dd75 Iustin Pop
      self._ValidateResult()
5077 298fe380 Iustin Pop
5078 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5079 d1c2dd75 Iustin Pop
    """Process the allocator results.
5080 538475ca Iustin Pop

5081 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5082 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5083 538475ca Iustin Pop

5084 d1c2dd75 Iustin Pop
    """
5085 d1c2dd75 Iustin Pop
    try:
5086 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5087 d1c2dd75 Iustin Pop
    except Exception, err:
5088 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5089 d1c2dd75 Iustin Pop
5090 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5091 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5092 538475ca Iustin Pop
5093 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5094 d1c2dd75 Iustin Pop
      if key not in rdict:
5095 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5096 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5097 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5098 538475ca Iustin Pop
5099 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5100 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5101 d1c2dd75 Iustin Pop
                               " is not a list")
5102 d1c2dd75 Iustin Pop
    self.out_data = rdict
5103 538475ca Iustin Pop
5104 538475ca Iustin Pop
5105 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5106 d61df03e Iustin Pop
  """Run allocator tests.
5107 d61df03e Iustin Pop

5108 d61df03e Iustin Pop
  This LU runs the allocator tests
5109 d61df03e Iustin Pop

5110 d61df03e Iustin Pop
  """
5111 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5112 d61df03e Iustin Pop
5113 d61df03e Iustin Pop
  def CheckPrereq(self):
5114 d61df03e Iustin Pop
    """Check prerequisites.
5115 d61df03e Iustin Pop

5116 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5117 d61df03e Iustin Pop

5118 d61df03e Iustin Pop
    """
5119 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5120 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5121 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5122 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5123 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5124 d61df03e Iustin Pop
                                     attr)
5125 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5126 d61df03e Iustin Pop
      if iname is not None:
5127 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5128 d61df03e Iustin Pop
                                   iname)
5129 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5130 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5131 d61df03e Iustin Pop
      for row in self.op.nics:
5132 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5133 d61df03e Iustin Pop
            "mac" not in row or
5134 d61df03e Iustin Pop
            "ip" not in row or
5135 d61df03e Iustin Pop
            "bridge" not in row):
5136 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5137 d61df03e Iustin Pop
                                     " 'nics' parameter")
5138 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5139 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5140 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5141 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5142 d61df03e Iustin Pop
      for row in self.op.disks:
5143 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5144 d61df03e Iustin Pop
            "size" not in row or
5145 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5146 d61df03e Iustin Pop
            "mode" not in row or
5147 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5148 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5149 d61df03e Iustin Pop
                                     " 'disks' parameter")
5150 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5151 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5152 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5153 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5154 d61df03e Iustin Pop
      if fname is None:
5155 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5156 d61df03e Iustin Pop
                                   self.op.name)
5157 d61df03e Iustin Pop
      self.op.name = fname
5158 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5159 d61df03e Iustin Pop
    else:
5160 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5161 d61df03e Iustin Pop
                                 self.op.mode)
5162 d61df03e Iustin Pop
5163 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5164 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5165 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5166 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5167 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5168 d61df03e Iustin Pop
                                 self.op.direction)
5169 d61df03e Iustin Pop
5170 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5171 d61df03e Iustin Pop
    """Run the allocator test.
5172 d61df03e Iustin Pop

5173 d61df03e Iustin Pop
    """
5174 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5175 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5176 29859cb7 Iustin Pop
                       mode=self.op.mode,
5177 29859cb7 Iustin Pop
                       name=self.op.name,
5178 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5179 29859cb7 Iustin Pop
                       disks=self.op.disks,
5180 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5181 29859cb7 Iustin Pop
                       os=self.op.os,
5182 29859cb7 Iustin Pop
                       tags=self.op.tags,
5183 29859cb7 Iustin Pop
                       nics=self.op.nics,
5184 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5185 29859cb7 Iustin Pop
                       )
5186 29859cb7 Iustin Pop
    else:
5187 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5188 29859cb7 Iustin Pop
                       mode=self.op.mode,
5189 29859cb7 Iustin Pop
                       name=self.op.name,
5190 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5191 29859cb7 Iustin Pop
                       )
5192 d61df03e Iustin Pop
5193 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5194 d1c2dd75 Iustin Pop
      result = ial.in_text
5195 298fe380 Iustin Pop
    else:
5196 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5197 d1c2dd75 Iustin Pop
      result = ial.out_text
5198 298fe380 Iustin Pop
    return result