Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 3d1e7706

History | View | Annotate | Download (169 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 a8083063 Iustin Pop
from ganeti import config
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 a8083063 Iustin Pop
from ganeti import ssconf
45 8d14b30d Iustin Pop
from ganeti import serializer
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
53 a8083063 Iustin Pop
      with all the fields (even if as None)
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 05f86716 Guido Trotter
    - optionally redefine their run requirements:
58 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
59 05f86716 Guido Trotter
        REQ_WSSTORE: the LU needs a writable SimpleStore
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 a8083063 Iustin Pop
  REQ_MASTER = True
68 05f86716 Guido Trotter
  REQ_WSSTORE = False
69 a8083063 Iustin Pop
70 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
71 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
72 a8083063 Iustin Pop

73 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
74 a8083063 Iustin Pop
    validity.
75 a8083063 Iustin Pop

76 a8083063 Iustin Pop
    """
77 5bfac263 Iustin Pop
    self.proc = processor
78 a8083063 Iustin Pop
    self.op = op
79 a8083063 Iustin Pop
    self.cfg = cfg
80 a8083063 Iustin Pop
    self.sstore = sstore
81 c92b310a Michael Hanselmann
    self.__ssh = None
82 c92b310a Michael Hanselmann
83 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
84 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
85 a8083063 Iustin Pop
      if attr_val is None:
86 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
87 3ecf6786 Iustin Pop
                                   attr_name)
88 c6d58a2b Michael Hanselmann
89 c6d58a2b Michael Hanselmann
    if not cfg.IsCluster():
90 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
91 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
92 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
93 c6d58a2b Michael Hanselmann
      master = sstore.GetMasterNode()
94 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
95 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
96 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
97 a8083063 Iustin Pop
98 c92b310a Michael Hanselmann
  def __GetSSH(self):
99 c92b310a Michael Hanselmann
    """Returns the SshRunner object
100 c92b310a Michael Hanselmann

101 c92b310a Michael Hanselmann
    """
102 c92b310a Michael Hanselmann
    if not self.__ssh:
103 1ff08570 Michael Hanselmann
      self.__ssh = ssh.SshRunner(self.sstore)
104 c92b310a Michael Hanselmann
    return self.__ssh
105 c92b310a Michael Hanselmann
106 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
107 c92b310a Michael Hanselmann
108 a8083063 Iustin Pop
  def CheckPrereq(self):
109 a8083063 Iustin Pop
    """Check prerequisites for this LU.
110 a8083063 Iustin Pop

111 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
112 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
113 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
114 a8083063 Iustin Pop
    allowed.
115 a8083063 Iustin Pop

116 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
117 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
118 a8083063 Iustin Pop

119 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
120 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
121 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
122 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
123 a8083063 Iustin Pop

124 a8083063 Iustin Pop
    """
125 a8083063 Iustin Pop
    raise NotImplementedError
126 a8083063 Iustin Pop
127 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
128 a8083063 Iustin Pop
    """Execute the LU.
129 a8083063 Iustin Pop

130 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
131 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
132 a8083063 Iustin Pop
    code, or expected.
133 a8083063 Iustin Pop

134 a8083063 Iustin Pop
    """
135 a8083063 Iustin Pop
    raise NotImplementedError
136 a8083063 Iustin Pop
137 a8083063 Iustin Pop
  def BuildHooksEnv(self):
138 a8083063 Iustin Pop
    """Build hooks environment for this LU.
139 a8083063 Iustin Pop

140 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
141 a8083063 Iustin Pop
    containing the environment that will be used for running the
142 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
143 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
144 a8083063 Iustin Pop
    the hook should run after the execution.
145 a8083063 Iustin Pop

146 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
147 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
148 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
149 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
150 a8083063 Iustin Pop

151 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
152 a8083063 Iustin Pop

153 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
154 a8083063 Iustin Pop
    not be called.
155 a8083063 Iustin Pop

156 a8083063 Iustin Pop
    """
157 a8083063 Iustin Pop
    raise NotImplementedError
158 a8083063 Iustin Pop
159 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
160 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
161 1fce5219 Guido Trotter

162 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
163 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
164 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
165 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
166 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
167 1fce5219 Guido Trotter

168 1fce5219 Guido Trotter
    Args:
169 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
170 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
171 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
172 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
173 1fce5219 Guido Trotter

174 1fce5219 Guido Trotter
    """
175 1fce5219 Guido Trotter
    return lu_result
176 1fce5219 Guido Trotter
177 a8083063 Iustin Pop
178 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
179 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
180 a8083063 Iustin Pop

181 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
182 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
183 a8083063 Iustin Pop

184 a8083063 Iustin Pop
  """
185 a8083063 Iustin Pop
  HPATH = None
186 a8083063 Iustin Pop
  HTYPE = None
187 a8083063 Iustin Pop
188 a8083063 Iustin Pop
189 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
190 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
191 83120a01 Michael Hanselmann

192 83120a01 Michael Hanselmann
  Args:
193 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
194 83120a01 Michael Hanselmann

195 83120a01 Michael Hanselmann
  """
196 3312b702 Iustin Pop
  if not isinstance(nodes, list):
197 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
198 dcb93971 Michael Hanselmann
199 dcb93971 Michael Hanselmann
  if nodes:
200 3312b702 Iustin Pop
    wanted = []
201 dcb93971 Michael Hanselmann
202 dcb93971 Michael Hanselmann
    for name in nodes:
203 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
204 dcb93971 Michael Hanselmann
      if node is None:
205 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
206 3312b702 Iustin Pop
      wanted.append(node)
207 dcb93971 Michael Hanselmann
208 dcb93971 Michael Hanselmann
  else:
209 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
210 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
211 3312b702 Iustin Pop
212 3312b702 Iustin Pop
213 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
214 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
215 3312b702 Iustin Pop

216 3312b702 Iustin Pop
  Args:
217 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
218 3312b702 Iustin Pop

219 3312b702 Iustin Pop
  """
220 3312b702 Iustin Pop
  if not isinstance(instances, list):
221 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
222 3312b702 Iustin Pop
223 3312b702 Iustin Pop
  if instances:
224 3312b702 Iustin Pop
    wanted = []
225 3312b702 Iustin Pop
226 3312b702 Iustin Pop
    for name in instances:
227 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
228 3312b702 Iustin Pop
      if instance is None:
229 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
230 3312b702 Iustin Pop
      wanted.append(instance)
231 3312b702 Iustin Pop
232 3312b702 Iustin Pop
  else:
233 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
234 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
235 dcb93971 Michael Hanselmann
236 dcb93971 Michael Hanselmann
237 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
238 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
239 83120a01 Michael Hanselmann

240 83120a01 Michael Hanselmann
  Args:
241 83120a01 Michael Hanselmann
    static: Static fields
242 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
243 83120a01 Michael Hanselmann

244 83120a01 Michael Hanselmann
  """
245 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
246 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
247 dcb93971 Michael Hanselmann
248 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
249 dcb93971 Michael Hanselmann
250 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
251 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
252 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
253 3ecf6786 Iustin Pop
                                          difference(all_fields)))
254 dcb93971 Michael Hanselmann
255 dcb93971 Michael Hanselmann
256 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
257 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
258 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
259 ecb215b5 Michael Hanselmann

260 ecb215b5 Michael Hanselmann
  Args:
261 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
262 396e1b78 Michael Hanselmann
  """
263 396e1b78 Michael Hanselmann
  env = {
264 0e137c28 Iustin Pop
    "OP_TARGET": name,
265 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
266 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
267 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
268 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
269 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
270 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
271 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
272 396e1b78 Michael Hanselmann
  }
273 396e1b78 Michael Hanselmann
274 396e1b78 Michael Hanselmann
  if nics:
275 396e1b78 Michael Hanselmann
    nic_count = len(nics)
276 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
277 396e1b78 Michael Hanselmann
      if ip is None:
278 396e1b78 Michael Hanselmann
        ip = ""
279 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
280 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
281 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
282 396e1b78 Michael Hanselmann
  else:
283 396e1b78 Michael Hanselmann
    nic_count = 0
284 396e1b78 Michael Hanselmann
285 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
286 396e1b78 Michael Hanselmann
287 396e1b78 Michael Hanselmann
  return env
288 396e1b78 Michael Hanselmann
289 396e1b78 Michael Hanselmann
290 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
291 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
292 ecb215b5 Michael Hanselmann

293 ecb215b5 Michael Hanselmann
  Args:
294 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
295 ecb215b5 Michael Hanselmann
    override: dict of values to override
296 ecb215b5 Michael Hanselmann
  """
297 396e1b78 Michael Hanselmann
  args = {
298 396e1b78 Michael Hanselmann
    'name': instance.name,
299 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
300 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
301 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
302 396e1b78 Michael Hanselmann
    'status': instance.os,
303 396e1b78 Michael Hanselmann
    'memory': instance.memory,
304 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
305 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
306 396e1b78 Michael Hanselmann
  }
307 396e1b78 Michael Hanselmann
  if override:
308 396e1b78 Michael Hanselmann
    args.update(override)
309 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
310 396e1b78 Michael Hanselmann
311 396e1b78 Michael Hanselmann
312 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
313 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
314 bf6929a2 Alexander Schreiber

315 bf6929a2 Alexander Schreiber
  """
316 bf6929a2 Alexander Schreiber
  # check bridges existance
317 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
318 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
319 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
320 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
321 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
322 bf6929a2 Alexander Schreiber
323 bf6929a2 Alexander Schreiber
324 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
325 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
326 a8083063 Iustin Pop

327 a8083063 Iustin Pop
  """
328 a8083063 Iustin Pop
  _OP_REQP = []
329 a8083063 Iustin Pop
330 a8083063 Iustin Pop
  def CheckPrereq(self):
331 a8083063 Iustin Pop
    """Check prerequisites.
332 a8083063 Iustin Pop

333 a8083063 Iustin Pop
    This checks whether the cluster is empty.
334 a8083063 Iustin Pop

335 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
336 a8083063 Iustin Pop

337 a8083063 Iustin Pop
    """
338 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
339 a8083063 Iustin Pop
340 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
341 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
342 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
343 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
344 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
345 db915bd1 Michael Hanselmann
    if instancelist:
346 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
347 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
348 a8083063 Iustin Pop
349 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
350 a8083063 Iustin Pop
    """Destroys the cluster.
351 a8083063 Iustin Pop

352 a8083063 Iustin Pop
    """
353 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
354 c9064964 Iustin Pop
    if not rpc.call_node_stop_master(master):
355 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
356 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
357 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
358 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
359 c8a0948f Michael Hanselmann
    rpc.call_node_leave_cluster(master)
360 a8083063 Iustin Pop
361 a8083063 Iustin Pop
362 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
363 a8083063 Iustin Pop
  """Verifies the cluster status.
364 a8083063 Iustin Pop

365 a8083063 Iustin Pop
  """
366 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
367 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
368 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
369 a8083063 Iustin Pop
370 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
371 a8083063 Iustin Pop
                  remote_version, feedback_fn):
372 a8083063 Iustin Pop
    """Run multiple tests against a node.
373 a8083063 Iustin Pop

374 a8083063 Iustin Pop
    Test list:
375 a8083063 Iustin Pop
      - compares ganeti version
376 a8083063 Iustin Pop
      - checks vg existance and size > 20G
377 a8083063 Iustin Pop
      - checks config file checksum
378 a8083063 Iustin Pop
      - checks ssh to other nodes
379 a8083063 Iustin Pop

380 a8083063 Iustin Pop
    Args:
381 a8083063 Iustin Pop
      node: name of the node to check
382 a8083063 Iustin Pop
      file_list: required list of files
383 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
384 098c0958 Michael Hanselmann

385 a8083063 Iustin Pop
    """
386 a8083063 Iustin Pop
    # compares ganeti version
387 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
388 a8083063 Iustin Pop
    if not remote_version:
389 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
390 a8083063 Iustin Pop
      return True
391 a8083063 Iustin Pop
392 a8083063 Iustin Pop
    if local_version != remote_version:
393 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
394 a8083063 Iustin Pop
                      (local_version, node, remote_version))
395 a8083063 Iustin Pop
      return True
396 a8083063 Iustin Pop
397 a8083063 Iustin Pop
    # checks vg existance and size > 20G
398 a8083063 Iustin Pop
399 a8083063 Iustin Pop
    bad = False
400 a8083063 Iustin Pop
    if not vglist:
401 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
402 a8083063 Iustin Pop
                      (node,))
403 a8083063 Iustin Pop
      bad = True
404 a8083063 Iustin Pop
    else:
405 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
406 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
407 a8083063 Iustin Pop
      if vgstatus:
408 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
409 a8083063 Iustin Pop
        bad = True
410 a8083063 Iustin Pop
411 a8083063 Iustin Pop
    # checks config file checksum
412 a8083063 Iustin Pop
    # checks ssh to any
413 a8083063 Iustin Pop
414 a8083063 Iustin Pop
    if 'filelist' not in node_result:
415 a8083063 Iustin Pop
      bad = True
416 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
417 a8083063 Iustin Pop
    else:
418 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
419 a8083063 Iustin Pop
      for file_name in file_list:
420 a8083063 Iustin Pop
        if file_name not in remote_cksum:
421 a8083063 Iustin Pop
          bad = True
422 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
423 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
424 a8083063 Iustin Pop
          bad = True
425 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
426 a8083063 Iustin Pop
427 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
428 a8083063 Iustin Pop
      bad = True
429 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
430 a8083063 Iustin Pop
    else:
431 a8083063 Iustin Pop
      if node_result['nodelist']:
432 a8083063 Iustin Pop
        bad = True
433 a8083063 Iustin Pop
        for node in node_result['nodelist']:
434 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
435 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
436 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
437 9d4bfc96 Iustin Pop
      bad = True
438 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
439 9d4bfc96 Iustin Pop
    else:
440 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
441 9d4bfc96 Iustin Pop
        bad = True
442 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
443 9d4bfc96 Iustin Pop
        for node in nlist:
444 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
445 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
446 9d4bfc96 Iustin Pop
447 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
448 a8083063 Iustin Pop
    if hyp_result is not None:
449 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
450 a8083063 Iustin Pop
    return bad
451 a8083063 Iustin Pop
452 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
453 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
454 a8083063 Iustin Pop
    """Verify an instance.
455 a8083063 Iustin Pop

456 a8083063 Iustin Pop
    This function checks to see if the required block devices are
457 a8083063 Iustin Pop
    available on the instance's node.
458 a8083063 Iustin Pop

459 a8083063 Iustin Pop
    """
460 a8083063 Iustin Pop
    bad = False
461 a8083063 Iustin Pop
462 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
463 a8083063 Iustin Pop
464 a8083063 Iustin Pop
    node_vol_should = {}
465 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
466 a8083063 Iustin Pop
467 a8083063 Iustin Pop
    for node in node_vol_should:
468 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
469 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
470 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
471 a8083063 Iustin Pop
                          (volume, node))
472 a8083063 Iustin Pop
          bad = True
473 a8083063 Iustin Pop
474 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
475 a872dae6 Guido Trotter
      if (node_current not in node_instance or
476 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
477 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
478 a8083063 Iustin Pop
                        (instance, node_current))
479 a8083063 Iustin Pop
        bad = True
480 a8083063 Iustin Pop
481 a8083063 Iustin Pop
    for node in node_instance:
482 a8083063 Iustin Pop
      if (not node == node_current):
483 a8083063 Iustin Pop
        if instance in node_instance[node]:
484 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
485 a8083063 Iustin Pop
                          (instance, node))
486 a8083063 Iustin Pop
          bad = True
487 a8083063 Iustin Pop
488 6a438c98 Michael Hanselmann
    return bad
489 a8083063 Iustin Pop
490 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
491 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
492 a8083063 Iustin Pop

493 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
494 a8083063 Iustin Pop
    reported as unknown.
495 a8083063 Iustin Pop

496 a8083063 Iustin Pop
    """
497 a8083063 Iustin Pop
    bad = False
498 a8083063 Iustin Pop
499 a8083063 Iustin Pop
    for node in node_vol_is:
500 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
501 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
502 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
503 a8083063 Iustin Pop
                      (volume, node))
504 a8083063 Iustin Pop
          bad = True
505 a8083063 Iustin Pop
    return bad
506 a8083063 Iustin Pop
507 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
508 a8083063 Iustin Pop
    """Verify the list of running instances.
509 a8083063 Iustin Pop

510 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
511 a8083063 Iustin Pop

512 a8083063 Iustin Pop
    """
513 a8083063 Iustin Pop
    bad = False
514 a8083063 Iustin Pop
    for node in node_instance:
515 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
516 a8083063 Iustin Pop
        if runninginstance not in instancelist:
517 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
518 a8083063 Iustin Pop
                          (runninginstance, node))
519 a8083063 Iustin Pop
          bad = True
520 a8083063 Iustin Pop
    return bad
521 a8083063 Iustin Pop
522 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
523 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
524 2b3b6ddd Guido Trotter

525 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
526 2b3b6ddd Guido Trotter
    was primary for.
527 2b3b6ddd Guido Trotter

528 2b3b6ddd Guido Trotter
    """
529 2b3b6ddd Guido Trotter
    bad = False
530 2b3b6ddd Guido Trotter
531 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
532 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
533 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
534 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
535 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
536 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
537 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
538 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
539 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
540 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
541 2b3b6ddd Guido Trotter
        needed_mem = 0
542 2b3b6ddd Guido Trotter
        for instance in instances:
543 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
544 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
545 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
546 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
547 2b3b6ddd Guido Trotter
          bad = True
548 2b3b6ddd Guido Trotter
    return bad
549 2b3b6ddd Guido Trotter
550 a8083063 Iustin Pop
  def CheckPrereq(self):
551 a8083063 Iustin Pop
    """Check prerequisites.
552 a8083063 Iustin Pop

553 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
554 e54c4c5e Guido Trotter
    all its members are valid.
555 a8083063 Iustin Pop

556 a8083063 Iustin Pop
    """
557 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
558 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
559 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
560 a8083063 Iustin Pop
561 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
562 d8fff41c Guido Trotter
    """Build hooks env.
563 d8fff41c Guido Trotter

564 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
565 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
566 d8fff41c Guido Trotter

567 d8fff41c Guido Trotter
    """
568 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
569 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
570 d8fff41c Guido Trotter
    env = {}
571 d8fff41c Guido Trotter
    return env, [], all_nodes
572 d8fff41c Guido Trotter
573 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
574 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
575 a8083063 Iustin Pop

576 a8083063 Iustin Pop
    """
577 a8083063 Iustin Pop
    bad = False
578 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
579 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
580 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
581 a8083063 Iustin Pop
582 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
583 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
584 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
585 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
586 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
587 a8083063 Iustin Pop
    node_volume = {}
588 a8083063 Iustin Pop
    node_instance = {}
589 9c9c7d30 Guido Trotter
    node_info = {}
590 26b6af5e Guido Trotter
    instance_cfg = {}
591 a8083063 Iustin Pop
592 a8083063 Iustin Pop
    # FIXME: verify OS list
593 a8083063 Iustin Pop
    # do local checksums
594 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
595 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
596 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
597 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
598 a8083063 Iustin Pop
599 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
600 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
601 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
602 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
603 a8083063 Iustin Pop
    node_verify_param = {
604 a8083063 Iustin Pop
      'filelist': file_names,
605 a8083063 Iustin Pop
      'nodelist': nodelist,
606 a8083063 Iustin Pop
      'hypervisor': None,
607 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
608 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
609 a8083063 Iustin Pop
      }
610 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
611 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
612 9c9c7d30 Guido Trotter
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
613 a8083063 Iustin Pop
614 a8083063 Iustin Pop
    for node in nodelist:
615 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
616 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
617 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
618 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
619 a8083063 Iustin Pop
      bad = bad or result
620 a8083063 Iustin Pop
621 a8083063 Iustin Pop
      # node_volume
622 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
623 a8083063 Iustin Pop
624 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
625 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
626 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
627 b63ed789 Iustin Pop
        bad = True
628 b63ed789 Iustin Pop
        node_volume[node] = {}
629 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
630 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
631 a8083063 Iustin Pop
        bad = True
632 a8083063 Iustin Pop
        continue
633 b63ed789 Iustin Pop
      else:
634 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
635 a8083063 Iustin Pop
636 a8083063 Iustin Pop
      # node_instance
637 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
638 a8083063 Iustin Pop
      if type(nodeinstance) != list:
639 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
640 a8083063 Iustin Pop
        bad = True
641 a8083063 Iustin Pop
        continue
642 a8083063 Iustin Pop
643 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
644 a8083063 Iustin Pop
645 9c9c7d30 Guido Trotter
      # node_info
646 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
647 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
648 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
649 9c9c7d30 Guido Trotter
        bad = True
650 9c9c7d30 Guido Trotter
        continue
651 9c9c7d30 Guido Trotter
652 9c9c7d30 Guido Trotter
      try:
653 9c9c7d30 Guido Trotter
        node_info[node] = {
654 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
655 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
656 93e4c50b Guido Trotter
          "pinst": [],
657 93e4c50b Guido Trotter
          "sinst": [],
658 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
659 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
660 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
661 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
662 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
663 36e7da50 Guido Trotter
          # secondary.
664 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
665 9c9c7d30 Guido Trotter
        }
666 9c9c7d30 Guido Trotter
      except ValueError:
667 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
668 9c9c7d30 Guido Trotter
        bad = True
669 9c9c7d30 Guido Trotter
        continue
670 9c9c7d30 Guido Trotter
671 a8083063 Iustin Pop
    node_vol_should = {}
672 a8083063 Iustin Pop
673 a8083063 Iustin Pop
    for instance in instancelist:
674 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
675 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
676 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
677 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
678 c5705f58 Guido Trotter
      bad = bad or result
679 a8083063 Iustin Pop
680 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
681 a8083063 Iustin Pop
682 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
683 26b6af5e Guido Trotter
684 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
685 93e4c50b Guido Trotter
      if pnode in node_info:
686 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
687 93e4c50b Guido Trotter
      else:
688 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
689 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
690 93e4c50b Guido Trotter
        bad = True
691 93e4c50b Guido Trotter
692 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
693 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
694 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
695 93e4c50b Guido Trotter
      # supported either.
696 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
697 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
698 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
699 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
700 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
701 93e4c50b Guido Trotter
                    % instance)
702 93e4c50b Guido Trotter
703 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
704 93e4c50b Guido Trotter
        if snode in node_info:
705 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
706 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
707 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
708 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
709 93e4c50b Guido Trotter
        else:
710 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
711 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
712 93e4c50b Guido Trotter
713 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
714 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
715 a8083063 Iustin Pop
                                       feedback_fn)
716 a8083063 Iustin Pop
    bad = bad or result
717 a8083063 Iustin Pop
718 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
719 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
720 a8083063 Iustin Pop
                                         feedback_fn)
721 a8083063 Iustin Pop
    bad = bad or result
722 a8083063 Iustin Pop
723 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
724 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
725 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
726 e54c4c5e Guido Trotter
      bad = bad or result
727 2b3b6ddd Guido Trotter
728 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
729 2b3b6ddd Guido Trotter
    if i_non_redundant:
730 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
731 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
732 2b3b6ddd Guido Trotter
733 a8083063 Iustin Pop
    return int(bad)
734 a8083063 Iustin Pop
735 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
736 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
737 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
738 d8fff41c Guido Trotter

739 d8fff41c Guido Trotter
    Args:
740 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
741 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
742 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
743 d8fff41c Guido Trotter
      lu_result: previous Exec result
744 d8fff41c Guido Trotter

745 d8fff41c Guido Trotter
    """
746 d8fff41c Guido Trotter
    # We only really run POST phase hooks, and are only interested in their results
747 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
748 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
749 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
750 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
751 d8fff41c Guido Trotter
      if not hooks_results:
752 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
753 d8fff41c Guido Trotter
        lu_result = 1
754 d8fff41c Guido Trotter
      else:
755 d8fff41c Guido Trotter
        for node_name in hooks_results:
756 d8fff41c Guido Trotter
          show_node_header = True
757 d8fff41c Guido Trotter
          res = hooks_results[node_name]
758 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
759 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
760 d8fff41c Guido Trotter
            lu_result = 1
761 d8fff41c Guido Trotter
            continue
762 d8fff41c Guido Trotter
          for script, hkr, output in res:
763 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
764 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
765 d8fff41c Guido Trotter
              # failing hooks on that node
766 d8fff41c Guido Trotter
              if show_node_header:
767 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
768 d8fff41c Guido Trotter
                show_node_header = False
769 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
770 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
771 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
772 d8fff41c Guido Trotter
              lu_result = 1
773 d8fff41c Guido Trotter
774 d8fff41c Guido Trotter
      return lu_result
775 d8fff41c Guido Trotter
776 a8083063 Iustin Pop
777 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
778 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
779 2c95a8d4 Iustin Pop

780 2c95a8d4 Iustin Pop
  """
781 2c95a8d4 Iustin Pop
  _OP_REQP = []
782 2c95a8d4 Iustin Pop
783 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
784 2c95a8d4 Iustin Pop
    """Check prerequisites.
785 2c95a8d4 Iustin Pop

786 2c95a8d4 Iustin Pop
    This has no prerequisites.
787 2c95a8d4 Iustin Pop

788 2c95a8d4 Iustin Pop
    """
789 2c95a8d4 Iustin Pop
    pass
790 2c95a8d4 Iustin Pop
791 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
792 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
793 2c95a8d4 Iustin Pop

794 2c95a8d4 Iustin Pop
    """
795 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
796 2c95a8d4 Iustin Pop
797 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
798 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
799 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
800 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
801 2c95a8d4 Iustin Pop
802 2c95a8d4 Iustin Pop
    nv_dict = {}
803 2c95a8d4 Iustin Pop
    for inst in instances:
804 2c95a8d4 Iustin Pop
      inst_lvs = {}
805 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
806 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
807 2c95a8d4 Iustin Pop
        continue
808 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
809 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
810 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
811 2c95a8d4 Iustin Pop
        for vol in vol_list:
812 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
813 2c95a8d4 Iustin Pop
814 2c95a8d4 Iustin Pop
    if not nv_dict:
815 2c95a8d4 Iustin Pop
      return result
816 2c95a8d4 Iustin Pop
817 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
818 2c95a8d4 Iustin Pop
819 2c95a8d4 Iustin Pop
    to_act = set()
820 2c95a8d4 Iustin Pop
    for node in nodes:
821 2c95a8d4 Iustin Pop
      # node_volume
822 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
823 2c95a8d4 Iustin Pop
824 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
825 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
826 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
827 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
828 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
829 2c95a8d4 Iustin Pop
                    (node,))
830 2c95a8d4 Iustin Pop
        res_nodes.append(node)
831 2c95a8d4 Iustin Pop
        continue
832 2c95a8d4 Iustin Pop
833 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
834 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
835 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
836 b63ed789 Iustin Pop
            and inst.name not in res_instances):
837 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
838 2c95a8d4 Iustin Pop
839 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
840 b63ed789 Iustin Pop
    # data better
841 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
842 b63ed789 Iustin Pop
      if inst.name not in res_missing:
843 b63ed789 Iustin Pop
        res_missing[inst.name] = []
844 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
845 b63ed789 Iustin Pop
846 2c95a8d4 Iustin Pop
    return result
847 2c95a8d4 Iustin Pop
848 2c95a8d4 Iustin Pop
849 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
850 07bd8a51 Iustin Pop
  """Rename the cluster.
851 07bd8a51 Iustin Pop

852 07bd8a51 Iustin Pop
  """
853 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
854 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
855 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
856 05f86716 Guido Trotter
  REQ_WSSTORE = True
857 07bd8a51 Iustin Pop
858 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
859 07bd8a51 Iustin Pop
    """Build hooks env.
860 07bd8a51 Iustin Pop

861 07bd8a51 Iustin Pop
    """
862 07bd8a51 Iustin Pop
    env = {
863 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
864 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
865 07bd8a51 Iustin Pop
      }
866 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
867 07bd8a51 Iustin Pop
    return env, [mn], [mn]
868 07bd8a51 Iustin Pop
869 07bd8a51 Iustin Pop
  def CheckPrereq(self):
870 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
871 07bd8a51 Iustin Pop

872 07bd8a51 Iustin Pop
    """
873 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
874 07bd8a51 Iustin Pop
875 bcf043c9 Iustin Pop
    new_name = hostname.name
876 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
877 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
878 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
879 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
880 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
881 07bd8a51 Iustin Pop
                                 " cluster has changed")
882 07bd8a51 Iustin Pop
    if new_ip != old_ip:
883 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
884 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
885 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
886 07bd8a51 Iustin Pop
                                   new_ip)
887 07bd8a51 Iustin Pop
888 07bd8a51 Iustin Pop
    self.op.name = new_name
889 07bd8a51 Iustin Pop
890 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
891 07bd8a51 Iustin Pop
    """Rename the cluster.
892 07bd8a51 Iustin Pop

893 07bd8a51 Iustin Pop
    """
894 07bd8a51 Iustin Pop
    clustername = self.op.name
895 07bd8a51 Iustin Pop
    ip = self.ip
896 07bd8a51 Iustin Pop
    ss = self.sstore
897 07bd8a51 Iustin Pop
898 07bd8a51 Iustin Pop
    # shutdown the master IP
899 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
900 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
901 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
902 07bd8a51 Iustin Pop
903 07bd8a51 Iustin Pop
    try:
904 07bd8a51 Iustin Pop
      # modify the sstore
905 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
906 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
907 07bd8a51 Iustin Pop
908 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
909 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
910 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
911 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
912 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
913 07bd8a51 Iustin Pop
914 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
915 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
916 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
917 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
918 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
919 07bd8a51 Iustin Pop
          if not result[to_node]:
920 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
921 07bd8a51 Iustin Pop
                         (fname, to_node))
922 07bd8a51 Iustin Pop
    finally:
923 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
924 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
925 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
926 07bd8a51 Iustin Pop
927 07bd8a51 Iustin Pop
928 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
929 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
930 8084f9f6 Manuel Franceschini

931 8084f9f6 Manuel Franceschini
  Args:
932 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
933 8084f9f6 Manuel Franceschini

934 8084f9f6 Manuel Franceschini
  Returns:
935 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
936 8084f9f6 Manuel Franceschini

937 8084f9f6 Manuel Franceschini
  """
938 8084f9f6 Manuel Franceschini
  if disk.children:
939 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
940 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
941 8084f9f6 Manuel Franceschini
        return True
942 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
943 8084f9f6 Manuel Franceschini
944 8084f9f6 Manuel Franceschini
945 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
946 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
947 8084f9f6 Manuel Franceschini

948 8084f9f6 Manuel Franceschini
  """
949 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
950 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
951 8084f9f6 Manuel Franceschini
  _OP_REQP = []
952 8084f9f6 Manuel Franceschini
953 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
954 8084f9f6 Manuel Franceschini
    """Build hooks env.
955 8084f9f6 Manuel Franceschini

956 8084f9f6 Manuel Franceschini
    """
957 8084f9f6 Manuel Franceschini
    env = {
958 8084f9f6 Manuel Franceschini
      "OP_TARGET": self.sstore.GetClusterName(),
959 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
960 8084f9f6 Manuel Franceschini
      }
961 8084f9f6 Manuel Franceschini
    mn = self.sstore.GetMasterNode()
962 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
963 8084f9f6 Manuel Franceschini
964 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
965 8084f9f6 Manuel Franceschini
    """Check prerequisites.
966 8084f9f6 Manuel Franceschini

967 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
968 5f83e263 Iustin Pop
    if the given volume group is valid.
969 8084f9f6 Manuel Franceschini

970 8084f9f6 Manuel Franceschini
    """
971 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
972 8084f9f6 Manuel Franceschini
      instances = [self.cfg.GetInstanceInfo(name)
973 8084f9f6 Manuel Franceschini
                   for name in self.cfg.GetInstanceList()]
974 8084f9f6 Manuel Franceschini
      for inst in instances:
975 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
976 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
977 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
978 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
979 8084f9f6 Manuel Franceschini
980 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
981 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
982 8084f9f6 Manuel Franceschini
      node_list = self.cfg.GetNodeList()
983 8084f9f6 Manuel Franceschini
      vglist = rpc.call_vg_list(node_list)
984 8084f9f6 Manuel Franceschini
      for node in node_list:
985 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
986 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
987 8084f9f6 Manuel Franceschini
        if vgstatus:
988 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
989 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
990 8084f9f6 Manuel Franceschini
991 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
992 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
993 8084f9f6 Manuel Franceschini

994 8084f9f6 Manuel Franceschini
    """
995 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
996 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
997 8084f9f6 Manuel Franceschini
    else:
998 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
999 8084f9f6 Manuel Franceschini
                  " state, not changing")
1000 8084f9f6 Manuel Franceschini
1001 8084f9f6 Manuel Franceschini
1002 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1003 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1004 a8083063 Iustin Pop

1005 a8083063 Iustin Pop
  """
1006 a8083063 Iustin Pop
  if not instance.disks:
1007 a8083063 Iustin Pop
    return True
1008 a8083063 Iustin Pop
1009 a8083063 Iustin Pop
  if not oneshot:
1010 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1011 a8083063 Iustin Pop
1012 a8083063 Iustin Pop
  node = instance.primary_node
1013 a8083063 Iustin Pop
1014 a8083063 Iustin Pop
  for dev in instance.disks:
1015 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1016 a8083063 Iustin Pop
1017 a8083063 Iustin Pop
  retries = 0
1018 a8083063 Iustin Pop
  while True:
1019 a8083063 Iustin Pop
    max_time = 0
1020 a8083063 Iustin Pop
    done = True
1021 a8083063 Iustin Pop
    cumul_degraded = False
1022 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1023 a8083063 Iustin Pop
    if not rstats:
1024 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1025 a8083063 Iustin Pop
      retries += 1
1026 a8083063 Iustin Pop
      if retries >= 10:
1027 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1028 3ecf6786 Iustin Pop
                                 " aborting." % node)
1029 a8083063 Iustin Pop
      time.sleep(6)
1030 a8083063 Iustin Pop
      continue
1031 a8083063 Iustin Pop
    retries = 0
1032 a8083063 Iustin Pop
    for i in range(len(rstats)):
1033 a8083063 Iustin Pop
      mstat = rstats[i]
1034 a8083063 Iustin Pop
      if mstat is None:
1035 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1036 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1037 a8083063 Iustin Pop
        continue
1038 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1039 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1040 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1041 a8083063 Iustin Pop
      if perc_done is not None:
1042 a8083063 Iustin Pop
        done = False
1043 a8083063 Iustin Pop
        if est_time is not None:
1044 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1045 a8083063 Iustin Pop
          max_time = est_time
1046 a8083063 Iustin Pop
        else:
1047 a8083063 Iustin Pop
          rem_time = "no time estimate"
1048 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1049 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1050 a8083063 Iustin Pop
    if done or oneshot:
1051 a8083063 Iustin Pop
      break
1052 a8083063 Iustin Pop
1053 a8083063 Iustin Pop
    if unlock:
1054 685ee993 Iustin Pop
      #utils.Unlock('cmd')
1055 685ee993 Iustin Pop
      pass
1056 a8083063 Iustin Pop
    try:
1057 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1058 a8083063 Iustin Pop
    finally:
1059 a8083063 Iustin Pop
      if unlock:
1060 685ee993 Iustin Pop
        #utils.Lock('cmd')
1061 685ee993 Iustin Pop
        pass
1062 a8083063 Iustin Pop
1063 a8083063 Iustin Pop
  if done:
1064 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1065 a8083063 Iustin Pop
  return not cumul_degraded
1066 a8083063 Iustin Pop
1067 a8083063 Iustin Pop
1068 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1069 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1070 a8083063 Iustin Pop

1071 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1072 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1073 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1074 0834c866 Iustin Pop

1075 a8083063 Iustin Pop
  """
1076 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1077 0834c866 Iustin Pop
  if ldisk:
1078 0834c866 Iustin Pop
    idx = 6
1079 0834c866 Iustin Pop
  else:
1080 0834c866 Iustin Pop
    idx = 5
1081 a8083063 Iustin Pop
1082 a8083063 Iustin Pop
  result = True
1083 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1084 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1085 a8083063 Iustin Pop
    if not rstats:
1086 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1087 a8083063 Iustin Pop
      result = False
1088 a8083063 Iustin Pop
    else:
1089 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1090 a8083063 Iustin Pop
  if dev.children:
1091 a8083063 Iustin Pop
    for child in dev.children:
1092 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1093 a8083063 Iustin Pop
1094 a8083063 Iustin Pop
  return result
1095 a8083063 Iustin Pop
1096 a8083063 Iustin Pop
1097 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1098 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1099 a8083063 Iustin Pop

1100 a8083063 Iustin Pop
  """
1101 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1102 a8083063 Iustin Pop
1103 a8083063 Iustin Pop
  def CheckPrereq(self):
1104 a8083063 Iustin Pop
    """Check prerequisites.
1105 a8083063 Iustin Pop

1106 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1107 a8083063 Iustin Pop

1108 a8083063 Iustin Pop
    """
1109 1f9430d6 Iustin Pop
    if self.op.names:
1110 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1111 1f9430d6 Iustin Pop
1112 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1113 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1114 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1115 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1116 1f9430d6 Iustin Pop
1117 1f9430d6 Iustin Pop
  @staticmethod
1118 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1119 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1120 1f9430d6 Iustin Pop

1121 1f9430d6 Iustin Pop
      Args:
1122 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1123 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1124 1f9430d6 Iustin Pop

1125 1f9430d6 Iustin Pop
      Returns:
1126 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1127 1f9430d6 Iustin Pop
             nodes as
1128 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1129 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1130 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1131 1f9430d6 Iustin Pop
                  }
1132 1f9430d6 Iustin Pop

1133 1f9430d6 Iustin Pop
    """
1134 1f9430d6 Iustin Pop
    all_os = {}
1135 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1136 1f9430d6 Iustin Pop
      if not nr:
1137 1f9430d6 Iustin Pop
        continue
1138 b4de68a9 Iustin Pop
      for os_obj in nr:
1139 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1140 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1141 1f9430d6 Iustin Pop
          # for each node in node_list
1142 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1143 1f9430d6 Iustin Pop
          for nname in node_list:
1144 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1145 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1146 1f9430d6 Iustin Pop
    return all_os
1147 a8083063 Iustin Pop
1148 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1149 a8083063 Iustin Pop
    """Compute the list of OSes.
1150 a8083063 Iustin Pop

1151 a8083063 Iustin Pop
    """
1152 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1153 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1154 a8083063 Iustin Pop
    if node_data == False:
1155 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1156 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1157 1f9430d6 Iustin Pop
    output = []
1158 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1159 1f9430d6 Iustin Pop
      row = []
1160 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1161 1f9430d6 Iustin Pop
        if field == "name":
1162 1f9430d6 Iustin Pop
          val = os_name
1163 1f9430d6 Iustin Pop
        elif field == "valid":
1164 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1165 1f9430d6 Iustin Pop
        elif field == "node_status":
1166 1f9430d6 Iustin Pop
          val = {}
1167 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1168 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1169 1f9430d6 Iustin Pop
        else:
1170 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1171 1f9430d6 Iustin Pop
        row.append(val)
1172 1f9430d6 Iustin Pop
      output.append(row)
1173 1f9430d6 Iustin Pop
1174 1f9430d6 Iustin Pop
    return output
1175 a8083063 Iustin Pop
1176 a8083063 Iustin Pop
1177 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1178 a8083063 Iustin Pop
  """Logical unit for removing a node.
1179 a8083063 Iustin Pop

1180 a8083063 Iustin Pop
  """
1181 a8083063 Iustin Pop
  HPATH = "node-remove"
1182 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1183 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1184 a8083063 Iustin Pop
1185 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1186 a8083063 Iustin Pop
    """Build hooks env.
1187 a8083063 Iustin Pop

1188 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1189 d08869ee Guido Trotter
    node would then be impossible to remove.
1190 a8083063 Iustin Pop

1191 a8083063 Iustin Pop
    """
1192 396e1b78 Michael Hanselmann
    env = {
1193 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1194 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1195 396e1b78 Michael Hanselmann
      }
1196 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1197 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1198 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1199 a8083063 Iustin Pop
1200 a8083063 Iustin Pop
  def CheckPrereq(self):
1201 a8083063 Iustin Pop
    """Check prerequisites.
1202 a8083063 Iustin Pop

1203 a8083063 Iustin Pop
    This checks:
1204 a8083063 Iustin Pop
     - the node exists in the configuration
1205 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1206 a8083063 Iustin Pop
     - it's not the master
1207 a8083063 Iustin Pop

1208 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1209 a8083063 Iustin Pop

1210 a8083063 Iustin Pop
    """
1211 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1212 a8083063 Iustin Pop
    if node is None:
1213 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1214 a8083063 Iustin Pop
1215 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1216 a8083063 Iustin Pop
1217 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1218 a8083063 Iustin Pop
    if node.name == masternode:
1219 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1220 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1221 a8083063 Iustin Pop
1222 a8083063 Iustin Pop
    for instance_name in instance_list:
1223 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1224 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1225 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1226 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1227 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1228 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1229 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1230 a8083063 Iustin Pop
    self.op.node_name = node.name
1231 a8083063 Iustin Pop
    self.node = node
1232 a8083063 Iustin Pop
1233 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1234 a8083063 Iustin Pop
    """Removes the node from the cluster.
1235 a8083063 Iustin Pop

1236 a8083063 Iustin Pop
    """
1237 a8083063 Iustin Pop
    node = self.node
1238 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1239 a8083063 Iustin Pop
                node.name)
1240 a8083063 Iustin Pop
1241 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1242 a8083063 Iustin Pop
1243 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1244 a8083063 Iustin Pop
1245 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1246 a8083063 Iustin Pop
1247 d9c02ca6 Michael Hanselmann
    utils.RemoveHostFromEtcHosts(node.name)
1248 c8a0948f Michael Hanselmann
1249 a8083063 Iustin Pop
1250 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1251 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1252 a8083063 Iustin Pop

1253 a8083063 Iustin Pop
  """
1254 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1255 a8083063 Iustin Pop
1256 a8083063 Iustin Pop
  def CheckPrereq(self):
1257 a8083063 Iustin Pop
    """Check prerequisites.
1258 a8083063 Iustin Pop

1259 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1260 a8083063 Iustin Pop

1261 a8083063 Iustin Pop
    """
1262 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1263 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1264 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1265 e8a4c138 Iustin Pop
      "bootid",
1266 e8a4c138 Iustin Pop
      "ctotal",
1267 e8a4c138 Iustin Pop
      ])
1268 a8083063 Iustin Pop
1269 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1270 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1271 130a6a6f Iustin Pop
                               "pip", "sip", "tags"],
1272 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1273 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1274 a8083063 Iustin Pop
1275 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1276 a8083063 Iustin Pop
1277 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1278 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1279 a8083063 Iustin Pop

1280 a8083063 Iustin Pop
    """
1281 246e180a Iustin Pop
    nodenames = self.wanted
1282 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1283 a8083063 Iustin Pop
1284 a8083063 Iustin Pop
    # begin data gathering
1285 a8083063 Iustin Pop
1286 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1287 a8083063 Iustin Pop
      live_data = {}
1288 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1289 a8083063 Iustin Pop
      for name in nodenames:
1290 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1291 a8083063 Iustin Pop
        if nodeinfo:
1292 a8083063 Iustin Pop
          live_data[name] = {
1293 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1294 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1295 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1296 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1297 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1298 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1299 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1300 a8083063 Iustin Pop
            }
1301 a8083063 Iustin Pop
        else:
1302 a8083063 Iustin Pop
          live_data[name] = {}
1303 a8083063 Iustin Pop
    else:
1304 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1305 a8083063 Iustin Pop
1306 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1307 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1308 a8083063 Iustin Pop
1309 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1310 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1311 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1312 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1313 a8083063 Iustin Pop
1314 ec223efb Iustin Pop
      for instance_name in instancelist:
1315 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1316 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1317 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1318 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1319 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1320 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1321 a8083063 Iustin Pop
1322 a8083063 Iustin Pop
    # end data gathering
1323 a8083063 Iustin Pop
1324 a8083063 Iustin Pop
    output = []
1325 a8083063 Iustin Pop
    for node in nodelist:
1326 a8083063 Iustin Pop
      node_output = []
1327 a8083063 Iustin Pop
      for field in self.op.output_fields:
1328 a8083063 Iustin Pop
        if field == "name":
1329 a8083063 Iustin Pop
          val = node.name
1330 ec223efb Iustin Pop
        elif field == "pinst_list":
1331 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1332 ec223efb Iustin Pop
        elif field == "sinst_list":
1333 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1334 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1335 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1336 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1337 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1338 a8083063 Iustin Pop
        elif field == "pip":
1339 a8083063 Iustin Pop
          val = node.primary_ip
1340 a8083063 Iustin Pop
        elif field == "sip":
1341 a8083063 Iustin Pop
          val = node.secondary_ip
1342 130a6a6f Iustin Pop
        elif field == "tags":
1343 130a6a6f Iustin Pop
          val = list(node.GetTags())
1344 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1345 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1346 a8083063 Iustin Pop
        else:
1347 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1348 a8083063 Iustin Pop
        node_output.append(val)
1349 a8083063 Iustin Pop
      output.append(node_output)
1350 a8083063 Iustin Pop
1351 a8083063 Iustin Pop
    return output
1352 a8083063 Iustin Pop
1353 a8083063 Iustin Pop
1354 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1355 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1356 dcb93971 Michael Hanselmann

1357 dcb93971 Michael Hanselmann
  """
1358 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1359 dcb93971 Michael Hanselmann
1360 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1361 dcb93971 Michael Hanselmann
    """Check prerequisites.
1362 dcb93971 Michael Hanselmann

1363 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1364 dcb93971 Michael Hanselmann

1365 dcb93971 Michael Hanselmann
    """
1366 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1367 dcb93971 Michael Hanselmann
1368 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1369 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1370 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1371 dcb93971 Michael Hanselmann
1372 dcb93971 Michael Hanselmann
1373 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1374 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1375 dcb93971 Michael Hanselmann

1376 dcb93971 Michael Hanselmann
    """
1377 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1378 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1379 dcb93971 Michael Hanselmann
1380 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1381 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1382 dcb93971 Michael Hanselmann
1383 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1384 dcb93971 Michael Hanselmann
1385 dcb93971 Michael Hanselmann
    output = []
1386 dcb93971 Michael Hanselmann
    for node in nodenames:
1387 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1388 37d19eb2 Michael Hanselmann
        continue
1389 37d19eb2 Michael Hanselmann
1390 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1391 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1392 dcb93971 Michael Hanselmann
1393 dcb93971 Michael Hanselmann
      for vol in node_vols:
1394 dcb93971 Michael Hanselmann
        node_output = []
1395 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1396 dcb93971 Michael Hanselmann
          if field == "node":
1397 dcb93971 Michael Hanselmann
            val = node
1398 dcb93971 Michael Hanselmann
          elif field == "phys":
1399 dcb93971 Michael Hanselmann
            val = vol['dev']
1400 dcb93971 Michael Hanselmann
          elif field == "vg":
1401 dcb93971 Michael Hanselmann
            val = vol['vg']
1402 dcb93971 Michael Hanselmann
          elif field == "name":
1403 dcb93971 Michael Hanselmann
            val = vol['name']
1404 dcb93971 Michael Hanselmann
          elif field == "size":
1405 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1406 dcb93971 Michael Hanselmann
          elif field == "instance":
1407 dcb93971 Michael Hanselmann
            for inst in ilist:
1408 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1409 dcb93971 Michael Hanselmann
                continue
1410 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1411 dcb93971 Michael Hanselmann
                val = inst.name
1412 dcb93971 Michael Hanselmann
                break
1413 dcb93971 Michael Hanselmann
            else:
1414 dcb93971 Michael Hanselmann
              val = '-'
1415 dcb93971 Michael Hanselmann
          else:
1416 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1417 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1418 dcb93971 Michael Hanselmann
1419 dcb93971 Michael Hanselmann
        output.append(node_output)
1420 dcb93971 Michael Hanselmann
1421 dcb93971 Michael Hanselmann
    return output
1422 dcb93971 Michael Hanselmann
1423 dcb93971 Michael Hanselmann
1424 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1425 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1426 a8083063 Iustin Pop

1427 a8083063 Iustin Pop
  """
1428 a8083063 Iustin Pop
  HPATH = "node-add"
1429 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1430 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1431 a8083063 Iustin Pop
1432 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1433 a8083063 Iustin Pop
    """Build hooks env.
1434 a8083063 Iustin Pop

1435 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1436 a8083063 Iustin Pop

1437 a8083063 Iustin Pop
    """
1438 a8083063 Iustin Pop
    env = {
1439 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1440 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1441 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1442 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1443 a8083063 Iustin Pop
      }
1444 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1445 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1446 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1447 a8083063 Iustin Pop
1448 a8083063 Iustin Pop
  def CheckPrereq(self):
1449 a8083063 Iustin Pop
    """Check prerequisites.
1450 a8083063 Iustin Pop

1451 a8083063 Iustin Pop
    This checks:
1452 a8083063 Iustin Pop
     - the new node is not already in the config
1453 a8083063 Iustin Pop
     - it is resolvable
1454 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1455 a8083063 Iustin Pop

1456 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1457 a8083063 Iustin Pop

1458 a8083063 Iustin Pop
    """
1459 a8083063 Iustin Pop
    node_name = self.op.node_name
1460 a8083063 Iustin Pop
    cfg = self.cfg
1461 a8083063 Iustin Pop
1462 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1463 a8083063 Iustin Pop
1464 bcf043c9 Iustin Pop
    node = dns_data.name
1465 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1466 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1467 a8083063 Iustin Pop
    if secondary_ip is None:
1468 a8083063 Iustin Pop
      secondary_ip = primary_ip
1469 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1470 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1471 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1472 e7c6e02b Michael Hanselmann
1473 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1474 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1475 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1476 e7c6e02b Michael Hanselmann
                                 node)
1477 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1478 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1479 a8083063 Iustin Pop
1480 a8083063 Iustin Pop
    for existing_node_name in node_list:
1481 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1482 e7c6e02b Michael Hanselmann
1483 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1484 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1485 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1486 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1487 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1488 e7c6e02b Michael Hanselmann
        continue
1489 e7c6e02b Michael Hanselmann
1490 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1491 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1492 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1493 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1494 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1495 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1496 a8083063 Iustin Pop
1497 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1498 a8083063 Iustin Pop
    # same as for the master
1499 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1500 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1501 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1502 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1503 a8083063 Iustin Pop
      if master_singlehomed:
1504 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1505 3ecf6786 Iustin Pop
                                   " new node has one")
1506 a8083063 Iustin Pop
      else:
1507 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1508 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1509 a8083063 Iustin Pop
1510 a8083063 Iustin Pop
    # checks reachablity
1511 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1512 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1513 a8083063 Iustin Pop
1514 a8083063 Iustin Pop
    if not newbie_singlehomed:
1515 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1516 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1517 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1518 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1519 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1520 a8083063 Iustin Pop
1521 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1522 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1523 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1524 a8083063 Iustin Pop
1525 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1526 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1527 a8083063 Iustin Pop

1528 a8083063 Iustin Pop
    """
1529 a8083063 Iustin Pop
    new_node = self.new_node
1530 a8083063 Iustin Pop
    node = new_node.name
1531 a8083063 Iustin Pop
1532 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1533 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1534 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1535 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1536 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1537 a8083063 Iustin Pop
    try:
1538 a8083063 Iustin Pop
      gntpem = f.read(8192)
1539 a8083063 Iustin Pop
    finally:
1540 a8083063 Iustin Pop
      f.close()
1541 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1542 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1543 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1544 a8083063 Iustin Pop
    # parsed by the shell sequence below
1545 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1546 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1547 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1548 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1549 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1550 a8083063 Iustin Pop
1551 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1552 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1553 a8083063 Iustin Pop
    # either by being constants or by the checks above
1554 a8083063 Iustin Pop
    ss = self.sstore
1555 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1556 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1557 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1558 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1559 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1560 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1561 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1562 a8083063 Iustin Pop
1563 c92b310a Michael Hanselmann
    result = self.ssh.Run(node, 'root', mycommand, batch=False, ask_key=True)
1564 a8083063 Iustin Pop
    if result.failed:
1565 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1566 3ecf6786 Iustin Pop
                               " output: %s" %
1567 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1568 a8083063 Iustin Pop
1569 a8083063 Iustin Pop
    # check connectivity
1570 a8083063 Iustin Pop
    time.sleep(4)
1571 a8083063 Iustin Pop
1572 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1573 a8083063 Iustin Pop
    if result:
1574 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1575 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1576 a8083063 Iustin Pop
                    (node, result))
1577 a8083063 Iustin Pop
      else:
1578 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1579 3ecf6786 Iustin Pop
                                 " node version %s" %
1580 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1581 a8083063 Iustin Pop
    else:
1582 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1583 a8083063 Iustin Pop
1584 a8083063 Iustin Pop
    # setup ssh on node
1585 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1586 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1587 a8083063 Iustin Pop
    keyarray = []
1588 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1589 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1590 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1591 a8083063 Iustin Pop
1592 a8083063 Iustin Pop
    for i in keyfiles:
1593 a8083063 Iustin Pop
      f = open(i, 'r')
1594 a8083063 Iustin Pop
      try:
1595 a8083063 Iustin Pop
        keyarray.append(f.read())
1596 a8083063 Iustin Pop
      finally:
1597 a8083063 Iustin Pop
        f.close()
1598 a8083063 Iustin Pop
1599 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1600 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1601 a8083063 Iustin Pop
1602 a8083063 Iustin Pop
    if not result:
1603 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1604 a8083063 Iustin Pop
1605 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1606 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1607 c8a0948f Michael Hanselmann
1608 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1609 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1610 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1611 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1612 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1613 16abfbc2 Alexander Schreiber
                                    10, False):
1614 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1615 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1616 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1617 a8083063 Iustin Pop
1618 c92b310a Michael Hanselmann
    success, msg = self.ssh.VerifyNodeHostname(node)
1619 ff98055b Iustin Pop
    if not success:
1620 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1621 f4bc1f2c Michael Hanselmann
                               " than the one the resolver gives: %s."
1622 f4bc1f2c Michael Hanselmann
                               " Please fix and re-run this command." %
1623 ff98055b Iustin Pop
                               (node, msg))
1624 ff98055b Iustin Pop
1625 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1626 a8083063 Iustin Pop
    # including the node just added
1627 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1628 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1629 102b115b Michael Hanselmann
    if not self.op.readd:
1630 102b115b Michael Hanselmann
      dist_nodes.append(node)
1631 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1632 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1633 a8083063 Iustin Pop
1634 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1635 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1636 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1637 a8083063 Iustin Pop
      for to_node in dist_nodes:
1638 a8083063 Iustin Pop
        if not result[to_node]:
1639 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1640 a8083063 Iustin Pop
                       (fname, to_node))
1641 a8083063 Iustin Pop
1642 3d1e7706 Guido Trotter
    to_copy = self.sstore.GetFileList()
1643 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1644 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1645 a8083063 Iustin Pop
    for fname in to_copy:
1646 b5602d15 Guido Trotter
      result = rpc.call_upload_file([node], fname)
1647 b5602d15 Guido Trotter
      if not result[node]:
1648 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1649 a8083063 Iustin Pop
1650 e7c6e02b Michael Hanselmann
    if not self.op.readd:
1651 e7c6e02b Michael Hanselmann
      logger.Info("adding node %s to cluster.conf" % node)
1652 e7c6e02b Michael Hanselmann
      self.cfg.AddNode(new_node)
1653 a8083063 Iustin Pop
1654 a8083063 Iustin Pop
1655 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1656 a8083063 Iustin Pop
  """Failover the master node to the current node.
1657 a8083063 Iustin Pop

1658 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1659 a8083063 Iustin Pop

1660 a8083063 Iustin Pop
  """
1661 a8083063 Iustin Pop
  HPATH = "master-failover"
1662 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1663 a8083063 Iustin Pop
  REQ_MASTER = False
1664 05f86716 Guido Trotter
  REQ_WSSTORE = True
1665 a8083063 Iustin Pop
  _OP_REQP = []
1666 a8083063 Iustin Pop
1667 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1668 a8083063 Iustin Pop
    """Build hooks env.
1669 a8083063 Iustin Pop

1670 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1671 a8083063 Iustin Pop
    the nodes in the post phase.
1672 a8083063 Iustin Pop

1673 a8083063 Iustin Pop
    """
1674 a8083063 Iustin Pop
    env = {
1675 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1676 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1677 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1678 a8083063 Iustin Pop
      }
1679 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1680 a8083063 Iustin Pop
1681 a8083063 Iustin Pop
  def CheckPrereq(self):
1682 a8083063 Iustin Pop
    """Check prerequisites.
1683 a8083063 Iustin Pop

1684 a8083063 Iustin Pop
    This checks that we are not already the master.
1685 a8083063 Iustin Pop

1686 a8083063 Iustin Pop
    """
1687 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1688 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1689 a8083063 Iustin Pop
1690 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1691 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1692 f4bc1f2c Michael Hanselmann
                                 " where you want the new master to be."
1693 f4bc1f2c Michael Hanselmann
                                 " %s is already the master" %
1694 3ecf6786 Iustin Pop
                                 self.old_master)
1695 a8083063 Iustin Pop
1696 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1697 a8083063 Iustin Pop
    """Failover the master node.
1698 a8083063 Iustin Pop

1699 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1700 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1701 a8083063 Iustin Pop
    master.
1702 a8083063 Iustin Pop

1703 a8083063 Iustin Pop
    """
1704 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1705 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1706 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1707 a8083063 Iustin Pop
1708 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1709 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1710 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1711 a8083063 Iustin Pop
1712 880478f8 Iustin Pop
    ss = self.sstore
1713 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1714 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1715 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1716 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1717 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1718 880478f8 Iustin Pop
1719 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1720 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1721 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1722 f4bc1f2c Michael Hanselmann
      feedback_fn("Error in activating the master IP on the new master,"
1723 f4bc1f2c Michael Hanselmann
                  " please fix manually.")
1724 a8083063 Iustin Pop
1725 a8083063 Iustin Pop
1726 a8083063 Iustin Pop
1727 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1728 a8083063 Iustin Pop
  """Query cluster configuration.
1729 a8083063 Iustin Pop

1730 a8083063 Iustin Pop
  """
1731 a8083063 Iustin Pop
  _OP_REQP = []
1732 59322403 Iustin Pop
  REQ_MASTER = False
1733 a8083063 Iustin Pop
1734 a8083063 Iustin Pop
  def CheckPrereq(self):
1735 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1736 a8083063 Iustin Pop

1737 a8083063 Iustin Pop
    """
1738 a8083063 Iustin Pop
    pass
1739 a8083063 Iustin Pop
1740 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1741 a8083063 Iustin Pop
    """Return cluster config.
1742 a8083063 Iustin Pop

1743 a8083063 Iustin Pop
    """
1744 a8083063 Iustin Pop
    result = {
1745 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1746 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1747 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1748 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1749 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1750 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1751 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1752 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1753 8a12ce45 Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
1754 a8083063 Iustin Pop
      }
1755 a8083063 Iustin Pop
1756 a8083063 Iustin Pop
    return result
1757 a8083063 Iustin Pop
1758 a8083063 Iustin Pop
1759 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1760 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1761 a8083063 Iustin Pop

1762 a8083063 Iustin Pop
  """
1763 a8083063 Iustin Pop
  _OP_REQP = []
1764 a8083063 Iustin Pop
1765 a8083063 Iustin Pop
  def CheckPrereq(self):
1766 a8083063 Iustin Pop
    """No prerequisites.
1767 a8083063 Iustin Pop

1768 a8083063 Iustin Pop
    """
1769 a8083063 Iustin Pop
    pass
1770 a8083063 Iustin Pop
1771 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1772 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1773 a8083063 Iustin Pop

1774 a8083063 Iustin Pop
    """
1775 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1776 a8083063 Iustin Pop
1777 a8083063 Iustin Pop
1778 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1779 a8083063 Iustin Pop
  """Bring up an instance's disks.
1780 a8083063 Iustin Pop

1781 a8083063 Iustin Pop
  """
1782 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1783 a8083063 Iustin Pop
1784 a8083063 Iustin Pop
  def CheckPrereq(self):
1785 a8083063 Iustin Pop
    """Check prerequisites.
1786 a8083063 Iustin Pop

1787 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1788 a8083063 Iustin Pop

1789 a8083063 Iustin Pop
    """
1790 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1791 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1792 a8083063 Iustin Pop
    if instance is None:
1793 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1794 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1795 a8083063 Iustin Pop
    self.instance = instance
1796 a8083063 Iustin Pop
1797 a8083063 Iustin Pop
1798 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1799 a8083063 Iustin Pop
    """Activate the disks.
1800 a8083063 Iustin Pop

1801 a8083063 Iustin Pop
    """
1802 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1803 a8083063 Iustin Pop
    if not disks_ok:
1804 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1805 a8083063 Iustin Pop
1806 a8083063 Iustin Pop
    return disks_info
1807 a8083063 Iustin Pop
1808 a8083063 Iustin Pop
1809 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1810 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1811 a8083063 Iustin Pop

1812 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1813 a8083063 Iustin Pop

1814 a8083063 Iustin Pop
  Args:
1815 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1816 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1817 a8083063 Iustin Pop
                        in an error return from the function
1818 a8083063 Iustin Pop

1819 a8083063 Iustin Pop
  Returns:
1820 a8083063 Iustin Pop
    false if the operation failed
1821 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1822 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1823 a8083063 Iustin Pop
  """
1824 a8083063 Iustin Pop
  device_info = []
1825 a8083063 Iustin Pop
  disks_ok = True
1826 fdbd668d Iustin Pop
  iname = instance.name
1827 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1828 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1829 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1830 fdbd668d Iustin Pop
1831 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1832 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1833 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1834 fdbd668d Iustin Pop
  # SyncSource, etc.)
1835 fdbd668d Iustin Pop
1836 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1837 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1838 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1839 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1840 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1841 a8083063 Iustin Pop
      if not result:
1842 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1843 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1844 fdbd668d Iustin Pop
        if not ignore_secondaries:
1845 a8083063 Iustin Pop
          disks_ok = False
1846 fdbd668d Iustin Pop
1847 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1848 fdbd668d Iustin Pop
1849 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1850 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1851 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1852 fdbd668d Iustin Pop
      if node != instance.primary_node:
1853 fdbd668d Iustin Pop
        continue
1854 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
1855 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1856 fdbd668d Iustin Pop
      if not result:
1857 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1858 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1859 fdbd668d Iustin Pop
        disks_ok = False
1860 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1861 a8083063 Iustin Pop
1862 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1863 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1864 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1865 b352ab5b Iustin Pop
  for disk in instance.disks:
1866 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1867 b352ab5b Iustin Pop
1868 a8083063 Iustin Pop
  return disks_ok, device_info
1869 a8083063 Iustin Pop
1870 a8083063 Iustin Pop
1871 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1872 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1873 3ecf6786 Iustin Pop

1874 3ecf6786 Iustin Pop
  """
1875 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1876 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1877 fe7b0351 Michael Hanselmann
  if not disks_ok:
1878 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1879 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1880 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1881 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1882 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1883 fe7b0351 Michael Hanselmann
1884 fe7b0351 Michael Hanselmann
1885 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1886 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1887 a8083063 Iustin Pop

1888 a8083063 Iustin Pop
  """
1889 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1890 a8083063 Iustin Pop
1891 a8083063 Iustin Pop
  def CheckPrereq(self):
1892 a8083063 Iustin Pop
    """Check prerequisites.
1893 a8083063 Iustin Pop

1894 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1895 a8083063 Iustin Pop

1896 a8083063 Iustin Pop
    """
1897 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1898 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1899 a8083063 Iustin Pop
    if instance is None:
1900 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1901 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1902 a8083063 Iustin Pop
    self.instance = instance
1903 a8083063 Iustin Pop
1904 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1905 a8083063 Iustin Pop
    """Deactivate the disks
1906 a8083063 Iustin Pop

1907 a8083063 Iustin Pop
    """
1908 a8083063 Iustin Pop
    instance = self.instance
1909 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1910 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1911 a8083063 Iustin Pop
    if not type(ins_l) is list:
1912 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1913 3ecf6786 Iustin Pop
                               instance.primary_node)
1914 a8083063 Iustin Pop
1915 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1916 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1917 3ecf6786 Iustin Pop
                               " block devices.")
1918 a8083063 Iustin Pop
1919 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1920 a8083063 Iustin Pop
1921 a8083063 Iustin Pop
1922 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1923 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1924 a8083063 Iustin Pop

1925 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1926 a8083063 Iustin Pop

1927 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1928 a8083063 Iustin Pop
  ignored.
1929 a8083063 Iustin Pop

1930 a8083063 Iustin Pop
  """
1931 a8083063 Iustin Pop
  result = True
1932 a8083063 Iustin Pop
  for disk in instance.disks:
1933 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1934 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1935 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1936 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1937 a8083063 Iustin Pop
                     (disk.iv_name, node))
1938 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1939 a8083063 Iustin Pop
          result = False
1940 a8083063 Iustin Pop
  return result
1941 a8083063 Iustin Pop
1942 a8083063 Iustin Pop
1943 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
1944 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
1945 d4f16fd9 Iustin Pop

1946 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
1947 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
1948 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
1949 d4f16fd9 Iustin Pop
  exception.
1950 d4f16fd9 Iustin Pop

1951 d4f16fd9 Iustin Pop
  Args:
1952 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
1953 d4f16fd9 Iustin Pop
    - node: the node name
1954 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
1955 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
1956 d4f16fd9 Iustin Pop

1957 d4f16fd9 Iustin Pop
  """
1958 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
1959 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
1960 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
1961 d4f16fd9 Iustin Pop
                             " information" % (node,))
1962 d4f16fd9 Iustin Pop
1963 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
1964 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
1965 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
1966 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
1967 d4f16fd9 Iustin Pop
  if requested > free_mem:
1968 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
1969 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
1970 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
1971 d4f16fd9 Iustin Pop
1972 d4f16fd9 Iustin Pop
1973 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1974 a8083063 Iustin Pop
  """Starts an instance.
1975 a8083063 Iustin Pop

1976 a8083063 Iustin Pop
  """
1977 a8083063 Iustin Pop
  HPATH = "instance-start"
1978 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1979 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1980 a8083063 Iustin Pop
1981 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1982 a8083063 Iustin Pop
    """Build hooks env.
1983 a8083063 Iustin Pop

1984 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1985 a8083063 Iustin Pop

1986 a8083063 Iustin Pop
    """
1987 a8083063 Iustin Pop
    env = {
1988 a8083063 Iustin Pop
      "FORCE": self.op.force,
1989 a8083063 Iustin Pop
      }
1990 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1991 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1992 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1993 a8083063 Iustin Pop
    return env, nl, nl
1994 a8083063 Iustin Pop
1995 a8083063 Iustin Pop
  def CheckPrereq(self):
1996 a8083063 Iustin Pop
    """Check prerequisites.
1997 a8083063 Iustin Pop

1998 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1999 a8083063 Iustin Pop

2000 a8083063 Iustin Pop
    """
2001 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2002 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2003 a8083063 Iustin Pop
    if instance is None:
2004 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2005 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2006 a8083063 Iustin Pop
2007 a8083063 Iustin Pop
    # check bridges existance
2008 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2009 a8083063 Iustin Pop
2010 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2011 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2012 d4f16fd9 Iustin Pop
                         instance.memory)
2013 d4f16fd9 Iustin Pop
2014 a8083063 Iustin Pop
    self.instance = instance
2015 a8083063 Iustin Pop
    self.op.instance_name = instance.name
2016 a8083063 Iustin Pop
2017 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2018 a8083063 Iustin Pop
    """Start the instance.
2019 a8083063 Iustin Pop

2020 a8083063 Iustin Pop
    """
2021 a8083063 Iustin Pop
    instance = self.instance
2022 a8083063 Iustin Pop
    force = self.op.force
2023 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2024 a8083063 Iustin Pop
2025 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2026 fe482621 Iustin Pop
2027 a8083063 Iustin Pop
    node_current = instance.primary_node
2028 a8083063 Iustin Pop
2029 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2030 a8083063 Iustin Pop
2031 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2032 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2033 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2034 a8083063 Iustin Pop
2035 a8083063 Iustin Pop
2036 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2037 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2038 bf6929a2 Alexander Schreiber

2039 bf6929a2 Alexander Schreiber
  """
2040 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2041 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2042 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2043 bf6929a2 Alexander Schreiber
2044 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2045 bf6929a2 Alexander Schreiber
    """Build hooks env.
2046 bf6929a2 Alexander Schreiber

2047 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2048 bf6929a2 Alexander Schreiber

2049 bf6929a2 Alexander Schreiber
    """
2050 bf6929a2 Alexander Schreiber
    env = {
2051 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2052 bf6929a2 Alexander Schreiber
      }
2053 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2054 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2055 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2056 bf6929a2 Alexander Schreiber
    return env, nl, nl
2057 bf6929a2 Alexander Schreiber
2058 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2059 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2060 bf6929a2 Alexander Schreiber

2061 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2062 bf6929a2 Alexander Schreiber

2063 bf6929a2 Alexander Schreiber
    """
2064 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
2065 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
2066 bf6929a2 Alexander Schreiber
    if instance is None:
2067 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
2068 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
2069 bf6929a2 Alexander Schreiber
2070 bf6929a2 Alexander Schreiber
    # check bridges existance
2071 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2072 bf6929a2 Alexander Schreiber
2073 bf6929a2 Alexander Schreiber
    self.instance = instance
2074 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
2075 bf6929a2 Alexander Schreiber
2076 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2077 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2078 bf6929a2 Alexander Schreiber

2079 bf6929a2 Alexander Schreiber
    """
2080 bf6929a2 Alexander Schreiber
    instance = self.instance
2081 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2082 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2083 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2084 bf6929a2 Alexander Schreiber
2085 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2086 bf6929a2 Alexander Schreiber
2087 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2088 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
2089 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
2090 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2091 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
2092 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2093 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2094 bf6929a2 Alexander Schreiber
2095 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2096 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2097 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2098 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2099 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2100 bf6929a2 Alexander Schreiber
    else:
2101 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2102 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2103 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2104 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2105 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2106 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2107 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2108 bf6929a2 Alexander Schreiber
2109 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2110 bf6929a2 Alexander Schreiber
2111 bf6929a2 Alexander Schreiber
2112 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2113 a8083063 Iustin Pop
  """Shutdown an instance.
2114 a8083063 Iustin Pop

2115 a8083063 Iustin Pop
  """
2116 a8083063 Iustin Pop
  HPATH = "instance-stop"
2117 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2118 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2119 a8083063 Iustin Pop
2120 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2121 a8083063 Iustin Pop
    """Build hooks env.
2122 a8083063 Iustin Pop

2123 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2124 a8083063 Iustin Pop

2125 a8083063 Iustin Pop
    """
2126 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2127 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2128 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2129 a8083063 Iustin Pop
    return env, nl, nl
2130 a8083063 Iustin Pop
2131 a8083063 Iustin Pop
  def CheckPrereq(self):
2132 a8083063 Iustin Pop
    """Check prerequisites.
2133 a8083063 Iustin Pop

2134 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2135 a8083063 Iustin Pop

2136 a8083063 Iustin Pop
    """
2137 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2138 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2139 a8083063 Iustin Pop
    if instance is None:
2140 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2141 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2142 a8083063 Iustin Pop
    self.instance = instance
2143 a8083063 Iustin Pop
2144 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2145 a8083063 Iustin Pop
    """Shutdown the instance.
2146 a8083063 Iustin Pop

2147 a8083063 Iustin Pop
    """
2148 a8083063 Iustin Pop
    instance = self.instance
2149 a8083063 Iustin Pop
    node_current = instance.primary_node
2150 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2151 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2152 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2153 a8083063 Iustin Pop
2154 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2155 a8083063 Iustin Pop
2156 a8083063 Iustin Pop
2157 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2158 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2159 fe7b0351 Michael Hanselmann

2160 fe7b0351 Michael Hanselmann
  """
2161 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2162 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2163 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2164 fe7b0351 Michael Hanselmann
2165 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2166 fe7b0351 Michael Hanselmann
    """Build hooks env.
2167 fe7b0351 Michael Hanselmann

2168 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2169 fe7b0351 Michael Hanselmann

2170 fe7b0351 Michael Hanselmann
    """
2171 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2172 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2173 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2174 fe7b0351 Michael Hanselmann
    return env, nl, nl
2175 fe7b0351 Michael Hanselmann
2176 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2177 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2178 fe7b0351 Michael Hanselmann

2179 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2180 fe7b0351 Michael Hanselmann

2181 fe7b0351 Michael Hanselmann
    """
2182 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2183 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2184 fe7b0351 Michael Hanselmann
    if instance is None:
2185 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2186 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2187 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2188 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2189 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2190 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2191 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2192 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2193 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2194 fe7b0351 Michael Hanselmann
    if remote_info:
2195 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2196 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2197 3ecf6786 Iustin Pop
                                  instance.primary_node))
2198 d0834de3 Michael Hanselmann
2199 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2200 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2201 d0834de3 Michael Hanselmann
      # OS verification
2202 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2203 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2204 d0834de3 Michael Hanselmann
      if pnode is None:
2205 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2206 3ecf6786 Iustin Pop
                                   self.op.pnode)
2207 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2208 dfa96ded Guido Trotter
      if not os_obj:
2209 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2210 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2211 d0834de3 Michael Hanselmann
2212 fe7b0351 Michael Hanselmann
    self.instance = instance
2213 fe7b0351 Michael Hanselmann
2214 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2215 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2216 fe7b0351 Michael Hanselmann

2217 fe7b0351 Michael Hanselmann
    """
2218 fe7b0351 Michael Hanselmann
    inst = self.instance
2219 fe7b0351 Michael Hanselmann
2220 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2221 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2222 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2223 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2224 d0834de3 Michael Hanselmann
2225 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2226 fe7b0351 Michael Hanselmann
    try:
2227 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2228 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2229 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2230 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2231 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2232 fe7b0351 Michael Hanselmann
    finally:
2233 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2234 fe7b0351 Michael Hanselmann
2235 fe7b0351 Michael Hanselmann
2236 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2237 decd5f45 Iustin Pop
  """Rename an instance.
2238 decd5f45 Iustin Pop

2239 decd5f45 Iustin Pop
  """
2240 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2241 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2242 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2243 decd5f45 Iustin Pop
2244 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2245 decd5f45 Iustin Pop
    """Build hooks env.
2246 decd5f45 Iustin Pop

2247 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2248 decd5f45 Iustin Pop

2249 decd5f45 Iustin Pop
    """
2250 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2251 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2252 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2253 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2254 decd5f45 Iustin Pop
    return env, nl, nl
2255 decd5f45 Iustin Pop
2256 decd5f45 Iustin Pop
  def CheckPrereq(self):
2257 decd5f45 Iustin Pop
    """Check prerequisites.
2258 decd5f45 Iustin Pop

2259 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2260 decd5f45 Iustin Pop

2261 decd5f45 Iustin Pop
    """
2262 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2263 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2264 decd5f45 Iustin Pop
    if instance is None:
2265 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2266 decd5f45 Iustin Pop
                                 self.op.instance_name)
2267 decd5f45 Iustin Pop
    if instance.status != "down":
2268 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2269 decd5f45 Iustin Pop
                                 self.op.instance_name)
2270 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2271 decd5f45 Iustin Pop
    if remote_info:
2272 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2273 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2274 decd5f45 Iustin Pop
                                  instance.primary_node))
2275 decd5f45 Iustin Pop
    self.instance = instance
2276 decd5f45 Iustin Pop
2277 decd5f45 Iustin Pop
    # new name verification
2278 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2279 decd5f45 Iustin Pop
2280 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2281 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2282 7bde3275 Guido Trotter
    if new_name in instance_list:
2283 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2284 c09f363f Manuel Franceschini
                                 new_name)
2285 7bde3275 Guido Trotter
2286 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2287 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2288 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2289 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2290 decd5f45 Iustin Pop
2291 decd5f45 Iustin Pop
2292 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2293 decd5f45 Iustin Pop
    """Reinstall the instance.
2294 decd5f45 Iustin Pop

2295 decd5f45 Iustin Pop
    """
2296 decd5f45 Iustin Pop
    inst = self.instance
2297 decd5f45 Iustin Pop
    old_name = inst.name
2298 decd5f45 Iustin Pop
2299 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2300 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2301 b23c4333 Manuel Franceschini
2302 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2303 decd5f45 Iustin Pop
2304 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2305 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2306 decd5f45 Iustin Pop
2307 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2308 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2309 b23c4333 Manuel Franceschini
      result = rpc.call_file_storage_dir_rename(inst.primary_node,
2310 b23c4333 Manuel Franceschini
                                                old_file_storage_dir,
2311 b23c4333 Manuel Franceschini
                                                new_file_storage_dir)
2312 b23c4333 Manuel Franceschini
2313 b23c4333 Manuel Franceschini
      if not result:
2314 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2315 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2316 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2317 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2318 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2319 b23c4333 Manuel Franceschini
2320 b23c4333 Manuel Franceschini
      if not result[0]:
2321 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2322 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2323 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2324 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2325 b23c4333 Manuel Franceschini
2326 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2327 decd5f45 Iustin Pop
    try:
2328 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2329 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2330 f4bc1f2c Michael Hanselmann
        msg = ("Could run OS rename script for instance %s on node %s (but the"
2331 f4bc1f2c Michael Hanselmann
               " instance has been renamed in Ganeti)" %
2332 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2333 decd5f45 Iustin Pop
        logger.Error(msg)
2334 decd5f45 Iustin Pop
    finally:
2335 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2336 decd5f45 Iustin Pop
2337 decd5f45 Iustin Pop
2338 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2339 a8083063 Iustin Pop
  """Remove an instance.
2340 a8083063 Iustin Pop

2341 a8083063 Iustin Pop
  """
2342 a8083063 Iustin Pop
  HPATH = "instance-remove"
2343 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2344 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2345 a8083063 Iustin Pop
2346 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2347 a8083063 Iustin Pop
    """Build hooks env.
2348 a8083063 Iustin Pop

2349 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2350 a8083063 Iustin Pop

2351 a8083063 Iustin Pop
    """
2352 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2353 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2354 a8083063 Iustin Pop
    return env, nl, nl
2355 a8083063 Iustin Pop
2356 a8083063 Iustin Pop
  def CheckPrereq(self):
2357 a8083063 Iustin Pop
    """Check prerequisites.
2358 a8083063 Iustin Pop

2359 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2360 a8083063 Iustin Pop

2361 a8083063 Iustin Pop
    """
2362 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2363 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2364 a8083063 Iustin Pop
    if instance is None:
2365 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2366 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2367 a8083063 Iustin Pop
    self.instance = instance
2368 a8083063 Iustin Pop
2369 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2370 a8083063 Iustin Pop
    """Remove the instance.
2371 a8083063 Iustin Pop

2372 a8083063 Iustin Pop
    """
2373 a8083063 Iustin Pop
    instance = self.instance
2374 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2375 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2376 a8083063 Iustin Pop
2377 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2378 1d67656e Iustin Pop
      if self.op.ignore_failures:
2379 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2380 1d67656e Iustin Pop
      else:
2381 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2382 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2383 a8083063 Iustin Pop
2384 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2385 a8083063 Iustin Pop
2386 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2387 1d67656e Iustin Pop
      if self.op.ignore_failures:
2388 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2389 1d67656e Iustin Pop
      else:
2390 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2391 a8083063 Iustin Pop
2392 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2393 a8083063 Iustin Pop
2394 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2395 a8083063 Iustin Pop
2396 a8083063 Iustin Pop
2397 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2398 a8083063 Iustin Pop
  """Logical unit for querying instances.
2399 a8083063 Iustin Pop

2400 a8083063 Iustin Pop
  """
2401 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2402 a8083063 Iustin Pop
2403 a8083063 Iustin Pop
  def CheckPrereq(self):
2404 a8083063 Iustin Pop
    """Check prerequisites.
2405 a8083063 Iustin Pop

2406 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2407 a8083063 Iustin Pop

2408 a8083063 Iustin Pop
    """
2409 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2410 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2411 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2412 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2413 130a6a6f Iustin Pop
                               "sda_size", "sdb_size", "vcpus", "tags"],
2414 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2415 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2416 a8083063 Iustin Pop
2417 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2418 069dcc86 Iustin Pop
2419 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2420 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2421 a8083063 Iustin Pop

2422 a8083063 Iustin Pop
    """
2423 069dcc86 Iustin Pop
    instance_names = self.wanted
2424 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2425 a8083063 Iustin Pop
                     in instance_names]
2426 a8083063 Iustin Pop
2427 a8083063 Iustin Pop
    # begin data gathering
2428 a8083063 Iustin Pop
2429 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2430 a8083063 Iustin Pop
2431 a8083063 Iustin Pop
    bad_nodes = []
2432 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2433 a8083063 Iustin Pop
      live_data = {}
2434 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2435 a8083063 Iustin Pop
      for name in nodes:
2436 a8083063 Iustin Pop
        result = node_data[name]
2437 a8083063 Iustin Pop
        if result:
2438 a8083063 Iustin Pop
          live_data.update(result)
2439 a8083063 Iustin Pop
        elif result == False:
2440 a8083063 Iustin Pop
          bad_nodes.append(name)
2441 a8083063 Iustin Pop
        # else no instance is alive
2442 a8083063 Iustin Pop
    else:
2443 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2444 a8083063 Iustin Pop
2445 a8083063 Iustin Pop
    # end data gathering
2446 a8083063 Iustin Pop
2447 a8083063 Iustin Pop
    output = []
2448 a8083063 Iustin Pop
    for instance in instance_list:
2449 a8083063 Iustin Pop
      iout = []
2450 a8083063 Iustin Pop
      for field in self.op.output_fields:
2451 a8083063 Iustin Pop
        if field == "name":
2452 a8083063 Iustin Pop
          val = instance.name
2453 a8083063 Iustin Pop
        elif field == "os":
2454 a8083063 Iustin Pop
          val = instance.os
2455 a8083063 Iustin Pop
        elif field == "pnode":
2456 a8083063 Iustin Pop
          val = instance.primary_node
2457 a8083063 Iustin Pop
        elif field == "snodes":
2458 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2459 a8083063 Iustin Pop
        elif field == "admin_state":
2460 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2461 a8083063 Iustin Pop
        elif field == "oper_state":
2462 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2463 8a23d2d3 Iustin Pop
            val = None
2464 a8083063 Iustin Pop
          else:
2465 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2466 d8052456 Iustin Pop
        elif field == "status":
2467 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2468 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2469 d8052456 Iustin Pop
          else:
2470 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2471 d8052456 Iustin Pop
            if running:
2472 d8052456 Iustin Pop
              if instance.status != "down":
2473 d8052456 Iustin Pop
                val = "running"
2474 d8052456 Iustin Pop
              else:
2475 d8052456 Iustin Pop
                val = "ERROR_up"
2476 d8052456 Iustin Pop
            else:
2477 d8052456 Iustin Pop
              if instance.status != "down":
2478 d8052456 Iustin Pop
                val = "ERROR_down"
2479 d8052456 Iustin Pop
              else:
2480 d8052456 Iustin Pop
                val = "ADMIN_down"
2481 a8083063 Iustin Pop
        elif field == "admin_ram":
2482 a8083063 Iustin Pop
          val = instance.memory
2483 a8083063 Iustin Pop
        elif field == "oper_ram":
2484 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2485 8a23d2d3 Iustin Pop
            val = None
2486 a8083063 Iustin Pop
          elif instance.name in live_data:
2487 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2488 a8083063 Iustin Pop
          else:
2489 a8083063 Iustin Pop
            val = "-"
2490 a8083063 Iustin Pop
        elif field == "disk_template":
2491 a8083063 Iustin Pop
          val = instance.disk_template
2492 a8083063 Iustin Pop
        elif field == "ip":
2493 a8083063 Iustin Pop
          val = instance.nics[0].ip
2494 a8083063 Iustin Pop
        elif field == "bridge":
2495 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2496 a8083063 Iustin Pop
        elif field == "mac":
2497 a8083063 Iustin Pop
          val = instance.nics[0].mac
2498 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2499 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2500 644eeef9 Iustin Pop
          if disk is None:
2501 8a23d2d3 Iustin Pop
            val = None
2502 644eeef9 Iustin Pop
          else:
2503 644eeef9 Iustin Pop
            val = disk.size
2504 d6d415e8 Iustin Pop
        elif field == "vcpus":
2505 d6d415e8 Iustin Pop
          val = instance.vcpus
2506 130a6a6f Iustin Pop
        elif field == "tags":
2507 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2508 a8083063 Iustin Pop
        else:
2509 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2510 a8083063 Iustin Pop
        iout.append(val)
2511 a8083063 Iustin Pop
      output.append(iout)
2512 a8083063 Iustin Pop
2513 a8083063 Iustin Pop
    return output
2514 a8083063 Iustin Pop
2515 a8083063 Iustin Pop
2516 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2517 a8083063 Iustin Pop
  """Failover an instance.
2518 a8083063 Iustin Pop

2519 a8083063 Iustin Pop
  """
2520 a8083063 Iustin Pop
  HPATH = "instance-failover"
2521 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2522 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2523 a8083063 Iustin Pop
2524 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2525 a8083063 Iustin Pop
    """Build hooks env.
2526 a8083063 Iustin Pop

2527 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2528 a8083063 Iustin Pop

2529 a8083063 Iustin Pop
    """
2530 a8083063 Iustin Pop
    env = {
2531 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2532 a8083063 Iustin Pop
      }
2533 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2534 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2535 a8083063 Iustin Pop
    return env, nl, nl
2536 a8083063 Iustin Pop
2537 a8083063 Iustin Pop
  def CheckPrereq(self):
2538 a8083063 Iustin Pop
    """Check prerequisites.
2539 a8083063 Iustin Pop

2540 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2541 a8083063 Iustin Pop

2542 a8083063 Iustin Pop
    """
2543 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2544 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2545 a8083063 Iustin Pop
    if instance is None:
2546 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2547 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2548 a8083063 Iustin Pop
2549 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2550 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2551 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2552 2a710df1 Michael Hanselmann
2553 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2554 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2555 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2556 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2557 2a710df1 Michael Hanselmann
2558 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2559 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2560 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2561 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2562 3a7c308e Guido Trotter
2563 a8083063 Iustin Pop
    # check bridge existance
2564 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2565 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2566 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2567 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2568 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2569 a8083063 Iustin Pop
2570 a8083063 Iustin Pop
    self.instance = instance
2571 a8083063 Iustin Pop
2572 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2573 a8083063 Iustin Pop
    """Failover an instance.
2574 a8083063 Iustin Pop

2575 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2576 a8083063 Iustin Pop
    starting it on the secondary.
2577 a8083063 Iustin Pop

2578 a8083063 Iustin Pop
    """
2579 a8083063 Iustin Pop
    instance = self.instance
2580 a8083063 Iustin Pop
2581 a8083063 Iustin Pop
    source_node = instance.primary_node
2582 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2583 a8083063 Iustin Pop
2584 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2585 a8083063 Iustin Pop
    for dev in instance.disks:
2586 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2587 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2588 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2589 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2590 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2591 a8083063 Iustin Pop
2592 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2593 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2594 a8083063 Iustin Pop
                (instance.name, source_node))
2595 a8083063 Iustin Pop
2596 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2597 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2598 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2599 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2600 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2601 24a40d57 Iustin Pop
      else:
2602 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2603 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2604 a8083063 Iustin Pop
2605 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2606 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2607 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2608 a8083063 Iustin Pop
2609 a8083063 Iustin Pop
    instance.primary_node = target_node
2610 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2611 b6102dab Guido Trotter
    self.cfg.Update(instance)
2612 a8083063 Iustin Pop
2613 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2614 12a0cfbe Guido Trotter
    if instance.status == "up":
2615 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2616 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2617 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2618 12a0cfbe Guido Trotter
2619 12a0cfbe Guido Trotter
      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2620 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2621 12a0cfbe Guido Trotter
      if not disks_ok:
2622 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2623 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2624 a8083063 Iustin Pop
2625 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2626 12a0cfbe Guido Trotter
      if not rpc.call_instance_start(target_node, instance, None):
2627 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2628 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2629 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2630 a8083063 Iustin Pop
2631 a8083063 Iustin Pop
2632 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2633 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2634 a8083063 Iustin Pop

2635 a8083063 Iustin Pop
  This always creates all devices.
2636 a8083063 Iustin Pop

2637 a8083063 Iustin Pop
  """
2638 a8083063 Iustin Pop
  if device.children:
2639 a8083063 Iustin Pop
    for child in device.children:
2640 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2641 a8083063 Iustin Pop
        return False
2642 a8083063 Iustin Pop
2643 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2644 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2645 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2646 a8083063 Iustin Pop
  if not new_id:
2647 a8083063 Iustin Pop
    return False
2648 a8083063 Iustin Pop
  if device.physical_id is None:
2649 a8083063 Iustin Pop
    device.physical_id = new_id
2650 a8083063 Iustin Pop
  return True
2651 a8083063 Iustin Pop
2652 a8083063 Iustin Pop
2653 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2654 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2655 a8083063 Iustin Pop

2656 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2657 a8083063 Iustin Pop
  all its children.
2658 a8083063 Iustin Pop

2659 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2660 a8083063 Iustin Pop

2661 a8083063 Iustin Pop
  """
2662 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2663 a8083063 Iustin Pop
    force = True
2664 a8083063 Iustin Pop
  if device.children:
2665 a8083063 Iustin Pop
    for child in device.children:
2666 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2667 3f78eef2 Iustin Pop
                                        child, force, info):
2668 a8083063 Iustin Pop
        return False
2669 a8083063 Iustin Pop
2670 a8083063 Iustin Pop
  if not force:
2671 a8083063 Iustin Pop
    return True
2672 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2673 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2674 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2675 a8083063 Iustin Pop
  if not new_id:
2676 a8083063 Iustin Pop
    return False
2677 a8083063 Iustin Pop
  if device.physical_id is None:
2678 a8083063 Iustin Pop
    device.physical_id = new_id
2679 a8083063 Iustin Pop
  return True
2680 a8083063 Iustin Pop
2681 a8083063 Iustin Pop
2682 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2683 923b1523 Iustin Pop
  """Generate a suitable LV name.
2684 923b1523 Iustin Pop

2685 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2686 923b1523 Iustin Pop

2687 923b1523 Iustin Pop
  """
2688 923b1523 Iustin Pop
  results = []
2689 923b1523 Iustin Pop
  for val in exts:
2690 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2691 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2692 923b1523 Iustin Pop
  return results
2693 923b1523 Iustin Pop
2694 923b1523 Iustin Pop
2695 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2696 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2697 a1f445d3 Iustin Pop

2698 a1f445d3 Iustin Pop
  """
2699 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2700 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2701 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2702 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2703 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2704 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2705 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2706 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2707 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2708 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2709 a1f445d3 Iustin Pop
  return drbd_dev
2710 a1f445d3 Iustin Pop
2711 7c0d6283 Michael Hanselmann
2712 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2713 a8083063 Iustin Pop
                          instance_name, primary_node,
2714 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
2715 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
2716 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2717 a8083063 Iustin Pop

2718 a8083063 Iustin Pop
  """
2719 a8083063 Iustin Pop
  #TODO: compute space requirements
2720 a8083063 Iustin Pop
2721 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2722 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2723 a8083063 Iustin Pop
    disks = []
2724 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2725 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2726 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2727 923b1523 Iustin Pop
2728 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2729 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2730 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2731 a8083063 Iustin Pop
                           iv_name = "sda")
2732 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2733 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2734 a8083063 Iustin Pop
                           iv_name = "sdb")
2735 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2736 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2737 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2738 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2739 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2740 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2741 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2742 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2743 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2744 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2745 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2746 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2747 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
2748 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
2749 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
2750 0f1a06e3 Manuel Franceschini
2751 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
2752 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
2753 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
2754 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
2755 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
2756 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
2757 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
2758 a8083063 Iustin Pop
  else:
2759 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2760 a8083063 Iustin Pop
  return disks
2761 a8083063 Iustin Pop
2762 a8083063 Iustin Pop
2763 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2764 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2765 3ecf6786 Iustin Pop

2766 3ecf6786 Iustin Pop
  """
2767 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2768 a0c3fea1 Michael Hanselmann
2769 a0c3fea1 Michael Hanselmann
2770 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2771 a8083063 Iustin Pop
  """Create all disks for an instance.
2772 a8083063 Iustin Pop

2773 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2774 a8083063 Iustin Pop

2775 a8083063 Iustin Pop
  Args:
2776 a8083063 Iustin Pop
    instance: the instance object
2777 a8083063 Iustin Pop

2778 a8083063 Iustin Pop
  Returns:
2779 a8083063 Iustin Pop
    True or False showing the success of the creation process
2780 a8083063 Iustin Pop

2781 a8083063 Iustin Pop
  """
2782 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2783 a0c3fea1 Michael Hanselmann
2784 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2785 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2786 0f1a06e3 Manuel Franceschini
    result = rpc.call_file_storage_dir_create(instance.primary_node,
2787 0f1a06e3 Manuel Franceschini
                                              file_storage_dir)
2788 0f1a06e3 Manuel Franceschini
2789 0f1a06e3 Manuel Franceschini
    if not result:
2790 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
2791 0f1a06e3 Manuel Franceschini
      return False
2792 0f1a06e3 Manuel Franceschini
2793 0f1a06e3 Manuel Franceschini
    if not result[0]:
2794 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
2795 0f1a06e3 Manuel Franceschini
      return False
2796 0f1a06e3 Manuel Franceschini
2797 a8083063 Iustin Pop
  for device in instance.disks:
2798 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2799 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
2800 a8083063 Iustin Pop
    #HARDCODE
2801 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2802 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2803 3f78eef2 Iustin Pop
                                        device, False, info):
2804 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2805 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2806 a8083063 Iustin Pop
        return False
2807 a8083063 Iustin Pop
    #HARDCODE
2808 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2809 3f78eef2 Iustin Pop
                                    instance, device, info):
2810 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2811 a8083063 Iustin Pop
                   device.iv_name)
2812 a8083063 Iustin Pop
      return False
2813 1c6e3627 Manuel Franceschini
2814 a8083063 Iustin Pop
  return True
2815 a8083063 Iustin Pop
2816 a8083063 Iustin Pop
2817 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2818 a8083063 Iustin Pop
  """Remove all disks for an instance.
2819 a8083063 Iustin Pop

2820 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2821 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2822 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2823 a8083063 Iustin Pop
  with `_CreateDisks()`).
2824 a8083063 Iustin Pop

2825 a8083063 Iustin Pop
  Args:
2826 a8083063 Iustin Pop
    instance: the instance object
2827 a8083063 Iustin Pop

2828 a8083063 Iustin Pop
  Returns:
2829 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2830 a8083063 Iustin Pop

2831 a8083063 Iustin Pop
  """
2832 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2833 a8083063 Iustin Pop
2834 a8083063 Iustin Pop
  result = True
2835 a8083063 Iustin Pop
  for device in instance.disks:
2836 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2837 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2838 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2839 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2840 a8083063 Iustin Pop
                     " continuing anyway" %
2841 a8083063 Iustin Pop
                     (device.iv_name, node))
2842 a8083063 Iustin Pop
        result = False
2843 0f1a06e3 Manuel Franceschini
2844 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2845 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2846 0f1a06e3 Manuel Franceschini
    if not rpc.call_file_storage_dir_remove(instance.primary_node,
2847 0f1a06e3 Manuel Franceschini
                                            file_storage_dir):
2848 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
2849 0f1a06e3 Manuel Franceschini
      result = False
2850 0f1a06e3 Manuel Franceschini
2851 a8083063 Iustin Pop
  return result
2852 a8083063 Iustin Pop
2853 a8083063 Iustin Pop
2854 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
2855 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
2856 e2fe6369 Iustin Pop

2857 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
2858 e2fe6369 Iustin Pop

2859 e2fe6369 Iustin Pop
  """
2860 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
2861 e2fe6369 Iustin Pop
  req_size_dict = {
2862 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
2863 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
2864 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
2865 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
2866 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
2867 e2fe6369 Iustin Pop
  }
2868 e2fe6369 Iustin Pop
2869 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
2870 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
2871 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
2872 e2fe6369 Iustin Pop
2873 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
2874 e2fe6369 Iustin Pop
2875 e2fe6369 Iustin Pop
2876 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2877 a8083063 Iustin Pop
  """Create an instance.
2878 a8083063 Iustin Pop

2879 a8083063 Iustin Pop
  """
2880 a8083063 Iustin Pop
  HPATH = "instance-add"
2881 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2882 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
2883 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2884 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
2885 a8083063 Iustin Pop
2886 538475ca Iustin Pop
  def _RunAllocator(self):
2887 538475ca Iustin Pop
    """Run the allocator based on input opcode.
2888 538475ca Iustin Pop

2889 538475ca Iustin Pop
    """
2890 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
2891 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
2892 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
2893 538475ca Iustin Pop
             "bridge": self.op.bridge}]
2894 d1c2dd75 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
2895 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
2896 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
2897 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
2898 d1c2dd75 Iustin Pop
                     tags=[],
2899 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
2900 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
2901 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
2902 d1c2dd75 Iustin Pop
                     disks=disks,
2903 d1c2dd75 Iustin Pop
                     nics=nics,
2904 29859cb7 Iustin Pop
                     )
2905 d1c2dd75 Iustin Pop
2906 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
2907 d1c2dd75 Iustin Pop
2908 d1c2dd75 Iustin Pop
    if not ial.success:
2909 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
2910 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
2911 d1c2dd75 Iustin Pop
                                                           ial.info))
2912 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
2913 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
2914 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
2915 27579978 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
2916 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
2917 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
2918 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
2919 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
2920 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
2921 27579978 Iustin Pop
    if ial.required_nodes == 2:
2922 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
2923 538475ca Iustin Pop
2924 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2925 a8083063 Iustin Pop
    """Build hooks env.
2926 a8083063 Iustin Pop

2927 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2928 a8083063 Iustin Pop

2929 a8083063 Iustin Pop
    """
2930 a8083063 Iustin Pop
    env = {
2931 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2932 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2933 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2934 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2935 a8083063 Iustin Pop
      }
2936 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2937 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2938 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2939 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2940 396e1b78 Michael Hanselmann
2941 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2942 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2943 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2944 396e1b78 Michael Hanselmann
      status=self.instance_status,
2945 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2946 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2947 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2948 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
2949 396e1b78 Michael Hanselmann
    ))
2950 a8083063 Iustin Pop
2951 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2952 a8083063 Iustin Pop
          self.secondaries)
2953 a8083063 Iustin Pop
    return env, nl, nl
2954 a8083063 Iustin Pop
2955 a8083063 Iustin Pop
2956 a8083063 Iustin Pop
  def CheckPrereq(self):
2957 a8083063 Iustin Pop
    """Check prerequisites.
2958 a8083063 Iustin Pop

2959 a8083063 Iustin Pop
    """
2960 538475ca Iustin Pop
    # set optional parameters to none if they don't exist
2961 538475ca Iustin Pop
    for attr in ["kernel_path", "initrd_path", "hvm_boot_order", "pnode",
2962 31a853d2 Iustin Pop
                 "iallocator", "hvm_acpi", "hvm_pae", "hvm_cdrom_image_path",
2963 31a853d2 Iustin Pop
                 "vnc_bind_address"]:
2964 40ed12dd Guido Trotter
      if not hasattr(self.op, attr):
2965 40ed12dd Guido Trotter
        setattr(self.op, attr, None)
2966 40ed12dd Guido Trotter
2967 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2968 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2969 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2970 3ecf6786 Iustin Pop
                                 self.op.mode)
2971 a8083063 Iustin Pop
2972 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
2973 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
2974 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
2975 eedc99de Manuel Franceschini
                                 " instances")
2976 eedc99de Manuel Franceschini
2977 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2978 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2979 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2980 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2981 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2982 3ecf6786 Iustin Pop
                                   " node and path options")
2983 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2984 a8083063 Iustin Pop
      if src_node_full is None:
2985 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2986 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2987 a8083063 Iustin Pop
2988 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2989 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2990 a8083063 Iustin Pop
2991 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2992 a8083063 Iustin Pop
2993 a8083063 Iustin Pop
      if not export_info:
2994 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2995 a8083063 Iustin Pop
2996 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2997 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2998 a8083063 Iustin Pop
2999 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3000 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3001 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3002 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3003 a8083063 Iustin Pop
3004 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3005 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3006 3ecf6786 Iustin Pop
                                   " one data disk")
3007 a8083063 Iustin Pop
3008 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3009 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3010 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3011 a8083063 Iustin Pop
                                                         'disk0_dump'))
3012 a8083063 Iustin Pop
      self.src_image = diskimage
3013 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
3014 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
3015 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
3016 a8083063 Iustin Pop
3017 901a65c1 Iustin Pop
    #### instance parameters check
3018 901a65c1 Iustin Pop
3019 a8083063 Iustin Pop
    # disk template and mirror node verification
3020 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3021 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
3022 a8083063 Iustin Pop
3023 901a65c1 Iustin Pop
    # instance name verification
3024 901a65c1 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
3025 901a65c1 Iustin Pop
3026 901a65c1 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
3027 901a65c1 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
3028 901a65c1 Iustin Pop
    if instance_name in instance_list:
3029 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3030 901a65c1 Iustin Pop
                                 instance_name)
3031 901a65c1 Iustin Pop
3032 901a65c1 Iustin Pop
    # ip validity checks
3033 901a65c1 Iustin Pop
    ip = getattr(self.op, "ip", None)
3034 901a65c1 Iustin Pop
    if ip is None or ip.lower() == "none":
3035 901a65c1 Iustin Pop
      inst_ip = None
3036 901a65c1 Iustin Pop
    elif ip.lower() == "auto":
3037 901a65c1 Iustin Pop
      inst_ip = hostname1.ip
3038 901a65c1 Iustin Pop
    else:
3039 901a65c1 Iustin Pop
      if not utils.IsValidIP(ip):
3040 901a65c1 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3041 901a65c1 Iustin Pop
                                   " like a valid IP" % ip)
3042 901a65c1 Iustin Pop
      inst_ip = ip
3043 901a65c1 Iustin Pop
    self.inst_ip = self.op.ip = inst_ip
3044 901a65c1 Iustin Pop
3045 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3046 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3047 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3048 901a65c1 Iustin Pop
3049 901a65c1 Iustin Pop
    if self.op.ip_check:
3050 901a65c1 Iustin Pop
      if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT):
3051 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3052 901a65c1 Iustin Pop
                                   (hostname1.ip, instance_name))
3053 901a65c1 Iustin Pop
3054 901a65c1 Iustin Pop
    # MAC address verification
3055 901a65c1 Iustin Pop
    if self.op.mac != "auto":
3056 901a65c1 Iustin Pop
      if not utils.IsValidMac(self.op.mac.lower()):
3057 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3058 901a65c1 Iustin Pop
                                   self.op.mac)
3059 901a65c1 Iustin Pop
3060 901a65c1 Iustin Pop
    # bridge verification
3061 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3062 901a65c1 Iustin Pop
    if bridge is None:
3063 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3064 901a65c1 Iustin Pop
    else:
3065 901a65c1 Iustin Pop
      self.op.bridge = bridge
3066 901a65c1 Iustin Pop
3067 901a65c1 Iustin Pop
    # boot order verification
3068 901a65c1 Iustin Pop
    if self.op.hvm_boot_order is not None:
3069 901a65c1 Iustin Pop
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3070 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid boot order specified,"
3071 901a65c1 Iustin Pop
                                   " must be one or more of [acdn]")
3072 901a65c1 Iustin Pop
    # file storage checks
3073 0f1a06e3 Manuel Franceschini
    if (self.op.file_driver and
3074 0f1a06e3 Manuel Franceschini
        not self.op.file_driver in constants.FILE_DRIVER):
3075 0f1a06e3 Manuel Franceschini
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3076 0f1a06e3 Manuel Franceschini
                                 self.op.file_driver)
3077 0f1a06e3 Manuel Franceschini
3078 0f1a06e3 Manuel Franceschini
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3079 b4de68a9 Iustin Pop
      raise errors.OpPrereqError("File storage directory not a relative"
3080 b4de68a9 Iustin Pop
                                 " path")
3081 538475ca Iustin Pop
    #### allocator run
3082 538475ca Iustin Pop
3083 538475ca Iustin Pop
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3084 538475ca Iustin Pop
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3085 538475ca Iustin Pop
                                 " node must be given")
3086 538475ca Iustin Pop
3087 538475ca Iustin Pop
    if self.op.iallocator is not None:
3088 538475ca Iustin Pop
      self._RunAllocator()
3089 0f1a06e3 Manuel Franceschini
3090 901a65c1 Iustin Pop
    #### node related checks
3091 901a65c1 Iustin Pop
3092 901a65c1 Iustin Pop
    # check primary node
3093 901a65c1 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
3094 901a65c1 Iustin Pop
    if pnode is None:
3095 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
3096 901a65c1 Iustin Pop
                                 self.op.pnode)
3097 901a65c1 Iustin Pop
    self.op.pnode = pnode.name
3098 901a65c1 Iustin Pop
    self.pnode = pnode
3099 901a65c1 Iustin Pop
    self.secondaries = []
3100 901a65c1 Iustin Pop
3101 901a65c1 Iustin Pop
    # mirror node verification
3102 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3103 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
3104 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3105 3ecf6786 Iustin Pop
                                   " a mirror node")
3106 a8083063 Iustin Pop
3107 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
3108 a8083063 Iustin Pop
      if snode_name is None:
3109 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
3110 3ecf6786 Iustin Pop
                                   self.op.snode)
3111 a8083063 Iustin Pop
      elif snode_name == pnode.name:
3112 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3113 3ecf6786 Iustin Pop
                                   " the primary node.")
3114 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
3115 a8083063 Iustin Pop
3116 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3117 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3118 ed1ebc60 Guido Trotter
3119 8d75db10 Iustin Pop
    # Check lv size requirements
3120 8d75db10 Iustin Pop
    if req_size is not None:
3121 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3122 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3123 8d75db10 Iustin Pop
      for node in nodenames:
3124 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3125 8d75db10 Iustin Pop
        if not info:
3126 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3127 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3128 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3129 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3130 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3131 8d75db10 Iustin Pop
                                     " node %s" % node)
3132 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3133 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3134 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3135 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3136 ed1ebc60 Guido Trotter
3137 a8083063 Iustin Pop
    # os verification
3138 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3139 dfa96ded Guido Trotter
    if not os_obj:
3140 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3141 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3142 a8083063 Iustin Pop
3143 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3144 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3145 3b6d8c9b Iustin Pop
3146 a8083063 Iustin Pop
3147 901a65c1 Iustin Pop
    # bridge check on primary node
3148 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3149 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3150 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3151 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3152 a8083063 Iustin Pop
3153 49ce1563 Iustin Pop
    # memory check on primary node
3154 49ce1563 Iustin Pop
    if self.op.start:
3155 49ce1563 Iustin Pop
      _CheckNodeFreeMemory(self.cfg, self.pnode.name,
3156 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3157 49ce1563 Iustin Pop
                           self.op.mem_size)
3158 49ce1563 Iustin Pop
3159 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
3160 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
3161 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
3162 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
3163 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
3164 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3165 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
3166 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
3167 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
3168 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
3169 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3170 31a853d2 Iustin Pop
3171 31a853d2 Iustin Pop
    # vnc_bind_address verification
3172 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
3173 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
3174 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
3175 31a853d2 Iustin Pop
                                   " like a valid IP address" %
3176 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
3177 31a853d2 Iustin Pop
3178 a8083063 Iustin Pop
    if self.op.start:
3179 a8083063 Iustin Pop
      self.instance_status = 'up'
3180 a8083063 Iustin Pop
    else:
3181 a8083063 Iustin Pop
      self.instance_status = 'down'
3182 a8083063 Iustin Pop
3183 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3184 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3185 a8083063 Iustin Pop

3186 a8083063 Iustin Pop
    """
3187 a8083063 Iustin Pop
    instance = self.op.instance_name
3188 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3189 a8083063 Iustin Pop
3190 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3191 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3192 1862d460 Alexander Schreiber
    else:
3193 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3194 1862d460 Alexander Schreiber
3195 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3196 a8083063 Iustin Pop
    if self.inst_ip is not None:
3197 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3198 a8083063 Iustin Pop
3199 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3200 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3201 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3202 2a6469d5 Alexander Schreiber
    else:
3203 2a6469d5 Alexander Schreiber
      network_port = None
3204 58acb49d Alexander Schreiber
3205 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is None:
3206 31a853d2 Iustin Pop
      self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3207 31a853d2 Iustin Pop
3208 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3209 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3210 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3211 2c313123 Manuel Franceschini
    else:
3212 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3213 2c313123 Manuel Franceschini
3214 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3215 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3216 0f1a06e3 Manuel Franceschini
                                        self.sstore.GetFileStorageDir(),
3217 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3218 0f1a06e3 Manuel Franceschini
3219 0f1a06e3 Manuel Franceschini
3220 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3221 a8083063 Iustin Pop
                                  self.op.disk_template,
3222 a8083063 Iustin Pop
                                  instance, pnode_name,
3223 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3224 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3225 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3226 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3227 a8083063 Iustin Pop
3228 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3229 a8083063 Iustin Pop
                            primary_node=pnode_name,
3230 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3231 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3232 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3233 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3234 a8083063 Iustin Pop
                            status=self.instance_status,
3235 58acb49d Alexander Schreiber
                            network_port=network_port,
3236 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3237 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3238 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3239 31a853d2 Iustin Pop
                            hvm_acpi=self.op.hvm_acpi,
3240 31a853d2 Iustin Pop
                            hvm_pae=self.op.hvm_pae,
3241 31a853d2 Iustin Pop
                            hvm_cdrom_image_path=self.op.hvm_cdrom_image_path,
3242 31a853d2 Iustin Pop
                            vnc_bind_address=self.op.vnc_bind_address,
3243 a8083063 Iustin Pop
                            )
3244 a8083063 Iustin Pop
3245 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3246 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3247 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3248 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3249 a8083063 Iustin Pop
3250 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3251 a8083063 Iustin Pop
3252 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3253 a8083063 Iustin Pop
3254 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3255 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3256 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3257 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3258 a8083063 Iustin Pop
      time.sleep(15)
3259 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3260 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3261 a8083063 Iustin Pop
    else:
3262 a8083063 Iustin Pop
      disk_abort = False
3263 a8083063 Iustin Pop
3264 a8083063 Iustin Pop
    if disk_abort:
3265 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3266 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3267 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3268 3ecf6786 Iustin Pop
                               " this instance")
3269 a8083063 Iustin Pop
3270 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3271 a8083063 Iustin Pop
                (instance, pnode_name))
3272 a8083063 Iustin Pop
3273 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3274 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3275 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3276 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3277 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3278 3ecf6786 Iustin Pop
                                   " on node %s" %
3279 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3280 a8083063 Iustin Pop
3281 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3282 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3283 a8083063 Iustin Pop
        src_node = self.op.src_node
3284 a8083063 Iustin Pop
        src_image = self.src_image
3285 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3286 a8083063 Iustin Pop
                                                src_node, src_image):
3287 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3288 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3289 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3290 a8083063 Iustin Pop
      else:
3291 a8083063 Iustin Pop
        # also checked in the prereq part
3292 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3293 3ecf6786 Iustin Pop
                                     % self.op.mode)
3294 a8083063 Iustin Pop
3295 a8083063 Iustin Pop
    if self.op.start:
3296 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3297 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3298 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3299 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3300 a8083063 Iustin Pop
3301 a8083063 Iustin Pop
3302 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3303 a8083063 Iustin Pop
  """Connect to an instance's console.
3304 a8083063 Iustin Pop

3305 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3306 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3307 a8083063 Iustin Pop
  console.
3308 a8083063 Iustin Pop

3309 a8083063 Iustin Pop
  """
3310 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3311 a8083063 Iustin Pop
3312 a8083063 Iustin Pop
  def CheckPrereq(self):
3313 a8083063 Iustin Pop
    """Check prerequisites.
3314 a8083063 Iustin Pop

3315 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3316 a8083063 Iustin Pop

3317 a8083063 Iustin Pop
    """
3318 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3319 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3320 a8083063 Iustin Pop
    if instance is None:
3321 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3322 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3323 a8083063 Iustin Pop
    self.instance = instance
3324 a8083063 Iustin Pop
3325 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3326 a8083063 Iustin Pop
    """Connect to the console of an instance
3327 a8083063 Iustin Pop

3328 a8083063 Iustin Pop
    """
3329 a8083063 Iustin Pop
    instance = self.instance
3330 a8083063 Iustin Pop
    node = instance.primary_node
3331 a8083063 Iustin Pop
3332 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3333 a8083063 Iustin Pop
    if node_insts is False:
3334 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3335 a8083063 Iustin Pop
3336 a8083063 Iustin Pop
    if instance.name not in node_insts:
3337 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3338 a8083063 Iustin Pop
3339 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3340 a8083063 Iustin Pop
3341 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3342 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3343 b047857b Michael Hanselmann
3344 82122173 Iustin Pop
    # build ssh cmdline
3345 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3346 a8083063 Iustin Pop
3347 a8083063 Iustin Pop
3348 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3349 a8083063 Iustin Pop
  """Replace the disks of an instance.
3350 a8083063 Iustin Pop

3351 a8083063 Iustin Pop
  """
3352 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3353 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3354 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3355 a8083063 Iustin Pop
3356 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3357 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3358 b6e82a65 Iustin Pop

3359 b6e82a65 Iustin Pop
    """
3360 b6e82a65 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3361 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3362 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3363 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3364 b6e82a65 Iustin Pop
3365 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3366 b6e82a65 Iustin Pop
3367 b6e82a65 Iustin Pop
    if not ial.success:
3368 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3369 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3370 b6e82a65 Iustin Pop
                                                           ial.info))
3371 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3372 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3373 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3374 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3375 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3376 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3377 b6e82a65 Iustin Pop
                    self.op.remote_node)
3378 b6e82a65 Iustin Pop
3379 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3380 a8083063 Iustin Pop
    """Build hooks env.
3381 a8083063 Iustin Pop

3382 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3383 a8083063 Iustin Pop

3384 a8083063 Iustin Pop
    """
3385 a8083063 Iustin Pop
    env = {
3386 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3387 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3388 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3389 a8083063 Iustin Pop
      }
3390 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3391 0834c866 Iustin Pop
    nl = [
3392 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3393 0834c866 Iustin Pop
      self.instance.primary_node,
3394 0834c866 Iustin Pop
      ]
3395 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3396 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3397 a8083063 Iustin Pop
    return env, nl, nl
3398 a8083063 Iustin Pop
3399 a8083063 Iustin Pop
  def CheckPrereq(self):
3400 a8083063 Iustin Pop
    """Check prerequisites.
3401 a8083063 Iustin Pop

3402 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3403 a8083063 Iustin Pop

3404 a8083063 Iustin Pop
    """
3405 b6e82a65 Iustin Pop
    if not hasattr(self.op, "remote_node"):
3406 b6e82a65 Iustin Pop
      self.op.remote_node = None
3407 b6e82a65 Iustin Pop
3408 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3409 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3410 a8083063 Iustin Pop
    if instance is None:
3411 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3412 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3413 a8083063 Iustin Pop
    self.instance = instance
3414 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3415 a8083063 Iustin Pop
3416 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3417 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3418 a9e0c397 Iustin Pop
                                 " network mirrored.")
3419 a8083063 Iustin Pop
3420 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3421 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3422 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3423 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3424 a8083063 Iustin Pop
3425 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3426 a9e0c397 Iustin Pop
3427 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3428 b6e82a65 Iustin Pop
    if ia_name is not None:
3429 b6e82a65 Iustin Pop
      if self.op.remote_node is not None:
3430 b6e82a65 Iustin Pop
        raise errors.OpPrereqError("Give either the iallocator or the new"
3431 b6e82a65 Iustin Pop
                                   " secondary, not both")
3432 b6e82a65 Iustin Pop
      self.op.remote_node = self._RunAllocator()
3433 b6e82a65 Iustin Pop
3434 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3435 a9e0c397 Iustin Pop
    if remote_node is not None:
3436 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3437 a8083063 Iustin Pop
      if remote_node is None:
3438 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3439 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3440 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3441 a9e0c397 Iustin Pop
    else:
3442 a9e0c397 Iustin Pop
      self.remote_node_info = None
3443 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3444 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3445 3ecf6786 Iustin Pop
                                 " the instance.")
3446 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3447 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3448 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3449 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3450 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3451 0834c866 Iustin Pop
                                   " replacement")
3452 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3453 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3454 7df43a76 Iustin Pop
          remote_node is not None):
3455 7df43a76 Iustin Pop
        # switch to replace secondary mode
3456 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3457 7df43a76 Iustin Pop
3458 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3459 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3460 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3461 a9e0c397 Iustin Pop
                                   " both at once")
3462 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3463 a9e0c397 Iustin Pop
        if remote_node is not None:
3464 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3465 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3466 a9e0c397 Iustin Pop
                                     " node disk replacement")
3467 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3468 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3469 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3470 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3471 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3472 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3473 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3474 a9e0c397 Iustin Pop
      else:
3475 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3476 a9e0c397 Iustin Pop
3477 a9e0c397 Iustin Pop
    for name in self.op.disks:
3478 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3479 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3480 a9e0c397 Iustin Pop
                                   (name, instance.name))
3481 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3482 a8083063 Iustin Pop
3483 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3484 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3485 a9e0c397 Iustin Pop

3486 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3487 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3488 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3489 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3490 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3491 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3492 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3493 a9e0c397 Iustin Pop
      - wait for sync across all devices
3494 a9e0c397 Iustin Pop
      - for each modified disk:
3495 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3496 a9e0c397 Iustin Pop

3497 a9e0c397 Iustin Pop
    Failures are not very well handled.
3498 cff90b79 Iustin Pop

3499 a9e0c397 Iustin Pop
    """
3500 cff90b79 Iustin Pop
    steps_total = 6
3501 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3502 a9e0c397 Iustin Pop
    instance = self.instance
3503 a9e0c397 Iustin Pop
    iv_names = {}
3504 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3505 a9e0c397 Iustin Pop
    # start of work
3506 a9e0c397 Iustin Pop
    cfg = self.cfg
3507 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3508 cff90b79 Iustin Pop
    oth_node = self.oth_node
3509 cff90b79 Iustin Pop
3510 cff90b79 Iustin Pop
    # Step: check device activation
3511 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3512 cff90b79 Iustin Pop
    info("checking volume groups")
3513 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3514 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3515 cff90b79 Iustin Pop
    if not results:
3516 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3517 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3518 cff90b79 Iustin Pop
      res = results.get(node, False)
3519 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3520 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3521 cff90b79 Iustin Pop
                                 (my_vg, node))
3522 cff90b79 Iustin Pop
    for dev in instance.disks:
3523 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3524 cff90b79 Iustin Pop
        continue
3525 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3526 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3527 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3528 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3529 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3530 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3531 cff90b79 Iustin Pop
3532 cff90b79 Iustin Pop
    # Step: check other node consistency
3533 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3534 cff90b79 Iustin Pop
    for dev in instance.disks:
3535 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3536 cff90b79 Iustin Pop
        continue
3537 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3538 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3539 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3540 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3541 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3542 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3543 cff90b79 Iustin Pop
3544 cff90b79 Iustin Pop
    # Step: create new storage
3545 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3546 a9e0c397 Iustin Pop
    for dev in instance.disks:
3547 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3548 a9e0c397 Iustin Pop
        continue
3549 a9e0c397 Iustin Pop
      size = dev.size
3550 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3551 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3552 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3553 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3554 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3555 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3556 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3557 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3558 a9e0c397 Iustin Pop
      old_lvs = dev.children
3559 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3560 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3561 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3562 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3563 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3564 a9e0c397 Iustin Pop
      # are talking about the secondary node
3565 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3566 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3567 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3568 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3569 a9e0c397 Iustin Pop
                                   " node '%s'" %
3570 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3571 a9e0c397 Iustin Pop
3572 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3573 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3574 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3575 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3576 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3577 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3578 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3579 cff90b79 Iustin Pop
      #dev.children = []
3580 cff90b79 Iustin Pop
      #cfg.Update(instance)
3581 a9e0c397 Iustin Pop
3582 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3583 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3584 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3585 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3586 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3587 cff90b79 Iustin Pop
3588 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3589 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3590 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3591 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3592 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3593 cff90b79 Iustin Pop
      rlist = []
3594 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3595 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3596 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3597 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3598 cff90b79 Iustin Pop
3599 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3600 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3601 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3602 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3603 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3604 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3605 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3606 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3607 cff90b79 Iustin Pop
3608 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3609 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3610 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3611 a9e0c397 Iustin Pop
3612 cff90b79 Iustin Pop
      for disk in old_lvs:
3613 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3614 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3615 a9e0c397 Iustin Pop
3616 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3617 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3618 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3619 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3620 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3621 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3622 cff90b79 Iustin Pop
                    " logical volumes")
3623 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3624 a9e0c397 Iustin Pop
3625 a9e0c397 Iustin Pop
      dev.children = new_lvs
3626 a9e0c397 Iustin Pop
      cfg.Update(instance)
3627 a9e0c397 Iustin Pop
3628 cff90b79 Iustin Pop
    # Step: wait for sync
3629 a9e0c397 Iustin Pop
3630 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3631 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3632 a9e0c397 Iustin Pop
    # return value
3633 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3634 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3635 a9e0c397 Iustin Pop
3636 a9e0c397 Iustin Pop
    # so check manually all the devices
3637 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3638 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3639 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3640 a9e0c397 Iustin Pop
      if is_degr:
3641 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3642 a9e0c397 Iustin Pop
3643 cff90b79 Iustin Pop
    # Step: remove old storage
3644 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3645 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3646 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3647 a9e0c397 Iustin Pop
      for lv in old_lvs:
3648 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3649 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3650 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3651 a9e0c397 Iustin Pop
          continue
3652 a9e0c397 Iustin Pop
3653 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3654 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3655 a9e0c397 Iustin Pop

3656 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3657 a9e0c397 Iustin Pop
      - for all disks of the instance:
3658 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3659 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3660 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3661 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3662 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3663 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3664 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3665 a9e0c397 Iustin Pop
          not network enabled
3666 a9e0c397 Iustin Pop
      - wait for sync across all devices
3667 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3668 a9e0c397 Iustin Pop

3669 a9e0c397 Iustin Pop
    Failures are not very well handled.
3670 0834c866 Iustin Pop

3671 a9e0c397 Iustin Pop
    """
3672 0834c866 Iustin Pop
    steps_total = 6
3673 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3674 a9e0c397 Iustin Pop
    instance = self.instance
3675 a9e0c397 Iustin Pop
    iv_names = {}
3676 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3677 a9e0c397 Iustin Pop
    # start of work
3678 a9e0c397 Iustin Pop
    cfg = self.cfg
3679 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3680 a9e0c397 Iustin Pop
    new_node = self.new_node
3681 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3682 0834c866 Iustin Pop
3683 0834c866 Iustin Pop
    # Step: check device activation
3684 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3685 0834c866 Iustin Pop
    info("checking volume groups")
3686 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3687 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3688 0834c866 Iustin Pop
    if not results:
3689 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3690 0834c866 Iustin Pop
    for node in pri_node, new_node:
3691 0834c866 Iustin Pop
      res = results.get(node, False)
3692 0834c866 Iustin Pop
      if not res or my_vg not in res:
3693 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3694 0834c866 Iustin Pop
                                 (my_vg, node))
3695 0834c866 Iustin Pop
    for dev in instance.disks:
3696 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3697 0834c866 Iustin Pop
        continue
3698 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3699 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3700 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3701 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3702 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3703 0834c866 Iustin Pop
3704 0834c866 Iustin Pop
    # Step: check other node consistency
3705 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3706 0834c866 Iustin Pop
    for dev in instance.disks:
3707 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3708 0834c866 Iustin Pop
        continue
3709 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3710 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3711 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3712 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3713 0834c866 Iustin Pop
                                 pri_node)
3714 0834c866 Iustin Pop
3715 0834c866 Iustin Pop
    # Step: create new storage
3716 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3717 a9e0c397 Iustin Pop
    for dev in instance.disks:
3718 a9e0c397 Iustin Pop
      size = dev.size
3719 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3720 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3721 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3722 a9e0c397 Iustin Pop
      # are talking about the secondary node
3723 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3724 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3725 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3726 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3727 a9e0c397 Iustin Pop
                                   " node '%s'" %
3728 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3729 a9e0c397 Iustin Pop
3730 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3731 0834c866 Iustin Pop
3732 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3733 0834c866 Iustin Pop
    for dev in instance.disks:
3734 0834c866 Iustin Pop
      size = dev.size
3735 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3736 a9e0c397 Iustin Pop
      # create new devices on new_node
3737 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3738 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3739 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3740 a9e0c397 Iustin Pop
                              children=dev.children)
3741 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3742 3f78eef2 Iustin Pop
                                        new_drbd, False,
3743 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3744 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3745 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3746 a9e0c397 Iustin Pop
3747 0834c866 Iustin Pop
    for dev in instance.disks:
3748 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3749 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3750 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3751 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3752 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3753 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
3754 a9e0c397 Iustin Pop
3755 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
3756 642445d9 Iustin Pop
    done = 0
3757 642445d9 Iustin Pop
    for dev in instance.disks:
3758 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3759 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
3760 642445d9 Iustin Pop
      # detach from network
3761 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
3762 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
3763 642445d9 Iustin Pop
      # standalone state
3764 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
3765 642445d9 Iustin Pop
        done += 1
3766 642445d9 Iustin Pop
      else:
3767 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
3768 642445d9 Iustin Pop
                dev.iv_name)
3769 642445d9 Iustin Pop
3770 642445d9 Iustin Pop
    if not done:
3771 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
3772 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3773 642445d9 Iustin Pop
3774 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
3775 642445d9 Iustin Pop
    # the instance to point to the new secondary
3776 642445d9 Iustin Pop
    info("updating instance configuration")
3777 642445d9 Iustin Pop
    for dev in instance.disks:
3778 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3779 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3780 642445d9 Iustin Pop
    cfg.Update(instance)
3781 a9e0c397 Iustin Pop
3782 642445d9 Iustin Pop
    # and now perform the drbd attach
3783 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
3784 642445d9 Iustin Pop
    failures = []
3785 642445d9 Iustin Pop
    for dev in instance.disks:
3786 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3787 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
3788 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
3789 642445d9 Iustin Pop
      # is correct
3790 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3791 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3792 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3793 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
3794 a9e0c397 Iustin Pop
3795 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3796 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3797 a9e0c397 Iustin Pop
    # return value
3798 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3799 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3800 a9e0c397 Iustin Pop
3801 a9e0c397 Iustin Pop
    # so check manually all the devices
3802 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3803 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3804 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3805 a9e0c397 Iustin Pop
      if is_degr:
3806 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3807 a9e0c397 Iustin Pop
3808 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3809 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3810 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
3811 a9e0c397 Iustin Pop
      for lv in old_lvs:
3812 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3813 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3814 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
3815 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
3816 a9e0c397 Iustin Pop
3817 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3818 a9e0c397 Iustin Pop
    """Execute disk replacement.
3819 a9e0c397 Iustin Pop

3820 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3821 a9e0c397 Iustin Pop

3822 a9e0c397 Iustin Pop
    """
3823 a9e0c397 Iustin Pop
    instance = self.instance
3824 22985314 Guido Trotter
3825 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
3826 22985314 Guido Trotter
    if instance.status == "down":
3827 22985314 Guido Trotter
      op = opcodes.OpActivateInstanceDisks(instance_name=instance.name)
3828 22985314 Guido Trotter
      self.proc.ChainOpCode(op)
3829 22985314 Guido Trotter
3830 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3831 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
3832 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
3833 a9e0c397 Iustin Pop
      else:
3834 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
3835 a9e0c397 Iustin Pop
    else:
3836 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
3837 22985314 Guido Trotter
3838 22985314 Guido Trotter
    ret = fn(feedback_fn)
3839 22985314 Guido Trotter
3840 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
3841 22985314 Guido Trotter
    if instance.status == "down":
3842 22985314 Guido Trotter
      op = opcodes.OpDeactivateInstanceDisks(instance_name=instance.name)
3843 22985314 Guido Trotter
      self.proc.ChainOpCode(op)
3844 22985314 Guido Trotter
3845 22985314 Guido Trotter
    return ret
3846 a9e0c397 Iustin Pop
3847 a8083063 Iustin Pop
3848 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
3849 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
3850 8729e0d7 Iustin Pop

3851 8729e0d7 Iustin Pop
  """
3852 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
3853 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3854 8729e0d7 Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount"]
3855 8729e0d7 Iustin Pop
3856 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
3857 8729e0d7 Iustin Pop
    """Build hooks env.
3858 8729e0d7 Iustin Pop

3859 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3860 8729e0d7 Iustin Pop

3861 8729e0d7 Iustin Pop
    """
3862 8729e0d7 Iustin Pop
    env = {
3863 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
3864 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
3865 8729e0d7 Iustin Pop
      }
3866 8729e0d7 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3867 8729e0d7 Iustin Pop
    nl = [
3868 8729e0d7 Iustin Pop
      self.sstore.GetMasterNode(),
3869 8729e0d7 Iustin Pop
      self.instance.primary_node,
3870 8729e0d7 Iustin Pop
      ]
3871 8729e0d7 Iustin Pop
    return env, nl, nl
3872 8729e0d7 Iustin Pop
3873 8729e0d7 Iustin Pop
  def CheckPrereq(self):
3874 8729e0d7 Iustin Pop
    """Check prerequisites.
3875 8729e0d7 Iustin Pop

3876 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
3877 8729e0d7 Iustin Pop

3878 8729e0d7 Iustin Pop
    """
3879 8729e0d7 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3880 8729e0d7 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3881 8729e0d7 Iustin Pop
    if instance is None:
3882 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3883 8729e0d7 Iustin Pop
                                 self.op.instance_name)
3884 8729e0d7 Iustin Pop
    self.instance = instance
3885 8729e0d7 Iustin Pop
    self.op.instance_name = instance.name
3886 8729e0d7 Iustin Pop
3887 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
3888 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
3889 8729e0d7 Iustin Pop
                                 " growing.")
3890 8729e0d7 Iustin Pop
3891 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
3892 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3893 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
3894 8729e0d7 Iustin Pop
3895 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
3896 8729e0d7 Iustin Pop
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3897 8729e0d7 Iustin Pop
    for node in nodenames:
3898 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
3899 8729e0d7 Iustin Pop
      if not info:
3900 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
3901 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
3902 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
3903 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
3904 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
3905 8729e0d7 Iustin Pop
                                   " node %s" % node)
3906 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
3907 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
3908 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
3909 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
3910 8729e0d7 Iustin Pop
3911 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
3912 8729e0d7 Iustin Pop
    """Execute disk grow.
3913 8729e0d7 Iustin Pop

3914 8729e0d7 Iustin Pop
    """
3915 8729e0d7 Iustin Pop
    instance = self.instance
3916 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
3917 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
3918 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
3919 8729e0d7 Iustin Pop
      result = rpc.call_blockdev_grow(node, disk, self.op.amount)
3920 8729e0d7 Iustin Pop
      if not result or not isinstance(result, tuple) or len(result) != 2:
3921 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
3922 8729e0d7 Iustin Pop
      elif not result[0]:
3923 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
3924 8729e0d7 Iustin Pop
                                 (node, result[1]))
3925 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
3926 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
3927 8729e0d7 Iustin Pop
    return
3928 8729e0d7 Iustin Pop
3929 8729e0d7 Iustin Pop
3930 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3931 a8083063 Iustin Pop
  """Query runtime instance data.
3932 a8083063 Iustin Pop

3933 a8083063 Iustin Pop
  """
3934 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3935 a8083063 Iustin Pop
3936 a8083063 Iustin Pop
  def CheckPrereq(self):
3937 a8083063 Iustin Pop
    """Check prerequisites.
3938 a8083063 Iustin Pop

3939 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3940 a8083063 Iustin Pop

3941 a8083063 Iustin Pop
    """
3942 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3943 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
3944 a8083063 Iustin Pop
    if self.op.instances:
3945 a8083063 Iustin Pop
      self.wanted_instances = []
3946 a8083063 Iustin Pop
      names = self.op.instances
3947 a8083063 Iustin Pop
      for name in names:
3948 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3949 a8083063 Iustin Pop
        if instance is None:
3950 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
3951 515207af Guido Trotter
        self.wanted_instances.append(instance)
3952 a8083063 Iustin Pop
    else:
3953 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3954 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3955 a8083063 Iustin Pop
    return
3956 a8083063 Iustin Pop
3957 a8083063 Iustin Pop
3958 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3959 a8083063 Iustin Pop
    """Compute block device status.
3960 a8083063 Iustin Pop

3961 a8083063 Iustin Pop
    """
3962 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3963 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3964 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
3965 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3966 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3967 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3968 a8083063 Iustin Pop
      else:
3969 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3970 a8083063 Iustin Pop
3971 a8083063 Iustin Pop
    if snode:
3972 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
3973 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3974 a8083063 Iustin Pop
    else:
3975 a8083063 Iustin Pop
      dev_sstatus = None
3976 a8083063 Iustin Pop
3977 a8083063 Iustin Pop
    if dev.children:
3978 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3979 a8083063 Iustin Pop
                      for child in dev.children]
3980 a8083063 Iustin Pop
    else:
3981 a8083063 Iustin Pop
      dev_children = []
3982 a8083063 Iustin Pop
3983 a8083063 Iustin Pop
    data = {
3984 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
3985 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
3986 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
3987 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
3988 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
3989 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
3990 a8083063 Iustin Pop
      "children": dev_children,
3991 a8083063 Iustin Pop
      }
3992 a8083063 Iustin Pop
3993 a8083063 Iustin Pop
    return data
3994 a8083063 Iustin Pop
3995 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3996 a8083063 Iustin Pop
    """Gather and return data"""
3997 a8083063 Iustin Pop
    result = {}
3998 a8083063 Iustin Pop
    for instance in self.wanted_instances:
3999 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4000 a8083063 Iustin Pop
                                                instance.name)
4001 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4002 a8083063 Iustin Pop
        remote_state = "up"
4003 a8083063 Iustin Pop
      else:
4004 a8083063 Iustin Pop
        remote_state = "down"
4005 a8083063 Iustin Pop
      if instance.status == "down":
4006 a8083063 Iustin Pop
        config_state = "down"
4007 a8083063 Iustin Pop
      else:
4008 a8083063 Iustin Pop
        config_state = "up"
4009 a8083063 Iustin Pop
4010 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4011 a8083063 Iustin Pop
               for device in instance.disks]
4012 a8083063 Iustin Pop
4013 a8083063 Iustin Pop
      idict = {
4014 a8083063 Iustin Pop
        "name": instance.name,
4015 a8083063 Iustin Pop
        "config_state": config_state,
4016 a8083063 Iustin Pop
        "run_state": remote_state,
4017 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4018 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4019 a8083063 Iustin Pop
        "os": instance.os,
4020 a8083063 Iustin Pop
        "memory": instance.memory,
4021 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4022 a8083063 Iustin Pop
        "disks": disks,
4023 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4024 a8083063 Iustin Pop
        }
4025 a8083063 Iustin Pop
4026 a8340917 Iustin Pop
      htkind = self.sstore.GetHypervisorType()
4027 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_PVM30:
4028 a8340917 Iustin Pop
        idict["kernel_path"] = instance.kernel_path
4029 a8340917 Iustin Pop
        idict["initrd_path"] = instance.initrd_path
4030 a8340917 Iustin Pop
4031 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_HVM31:
4032 a8340917 Iustin Pop
        idict["hvm_boot_order"] = instance.hvm_boot_order
4033 a8340917 Iustin Pop
        idict["hvm_acpi"] = instance.hvm_acpi
4034 a8340917 Iustin Pop
        idict["hvm_pae"] = instance.hvm_pae
4035 a8340917 Iustin Pop
        idict["hvm_cdrom_image_path"] = instance.hvm_cdrom_image_path
4036 a8340917 Iustin Pop
4037 a8340917 Iustin Pop
      if htkind in constants.HTS_REQ_PORT:
4038 a8340917 Iustin Pop
        idict["vnc_bind_address"] = instance.vnc_bind_address
4039 a8340917 Iustin Pop
        idict["network_port"] = instance.network_port
4040 a8340917 Iustin Pop
4041 a8083063 Iustin Pop
      result[instance.name] = idict
4042 a8083063 Iustin Pop
4043 a8083063 Iustin Pop
    return result
4044 a8083063 Iustin Pop
4045 a8083063 Iustin Pop
4046 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4047 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4048 a8083063 Iustin Pop

4049 a8083063 Iustin Pop
  """
4050 a8083063 Iustin Pop
  HPATH = "instance-modify"
4051 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4052 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4053 a8083063 Iustin Pop
4054 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4055 a8083063 Iustin Pop
    """Build hooks env.
4056 a8083063 Iustin Pop

4057 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4058 a8083063 Iustin Pop

4059 a8083063 Iustin Pop
    """
4060 396e1b78 Michael Hanselmann
    args = dict()
4061 a8083063 Iustin Pop
    if self.mem:
4062 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4063 a8083063 Iustin Pop
    if self.vcpus:
4064 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4065 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4066 396e1b78 Michael Hanselmann
      if self.do_ip:
4067 396e1b78 Michael Hanselmann
        ip = self.ip
4068 396e1b78 Michael Hanselmann
      else:
4069 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4070 396e1b78 Michael Hanselmann
      if self.bridge:
4071 396e1b78 Michael Hanselmann
        bridge = self.bridge
4072 396e1b78 Michael Hanselmann
      else:
4073 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4074 ef756965 Iustin Pop
      if self.mac:
4075 ef756965 Iustin Pop
        mac = self.mac
4076 ef756965 Iustin Pop
      else:
4077 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4078 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4079 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4080 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4081 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4082 a8083063 Iustin Pop
    return env, nl, nl
4083 a8083063 Iustin Pop
4084 a8083063 Iustin Pop
  def CheckPrereq(self):
4085 a8083063 Iustin Pop
    """Check prerequisites.
4086 a8083063 Iustin Pop

4087 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4088 a8083063 Iustin Pop

4089 a8083063 Iustin Pop
    """
4090 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4091 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4092 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4093 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4094 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4095 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4096 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4097 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4098 31a853d2 Iustin Pop
    self.hvm_acpi = getattr(self.op, "hvm_acpi", None)
4099 31a853d2 Iustin Pop
    self.hvm_pae = getattr(self.op, "hvm_pae", None)
4100 31a853d2 Iustin Pop
    self.hvm_cdrom_image_path = getattr(self.op, "hvm_cdrom_image_path", None)
4101 31a853d2 Iustin Pop
    self.vnc_bind_address = getattr(self.op, "vnc_bind_address", None)
4102 31a853d2 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4103 31a853d2 Iustin Pop
                 self.kernel_path, self.initrd_path, self.hvm_boot_order,
4104 31a853d2 Iustin Pop
                 self.hvm_acpi, self.hvm_pae, self.hvm_cdrom_image_path,
4105 31a853d2 Iustin Pop
                 self.vnc_bind_address]
4106 31a853d2 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4107 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4108 a8083063 Iustin Pop
    if self.mem is not None:
4109 a8083063 Iustin Pop
      try:
4110 a8083063 Iustin Pop
        self.mem = int(self.mem)
4111 a8083063 Iustin Pop
      except ValueError, err:
4112 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4113 a8083063 Iustin Pop
    if self.vcpus is not None:
4114 a8083063 Iustin Pop
      try:
4115 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4116 a8083063 Iustin Pop
      except ValueError, err:
4117 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4118 a8083063 Iustin Pop
    if self.ip is not None:
4119 a8083063 Iustin Pop
      self.do_ip = True
4120 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4121 a8083063 Iustin Pop
        self.ip = None
4122 a8083063 Iustin Pop
      else:
4123 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4124 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4125 a8083063 Iustin Pop
    else:
4126 a8083063 Iustin Pop
      self.do_ip = False
4127 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4128 1862d460 Alexander Schreiber
    if self.mac is not None:
4129 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4130 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4131 1862d460 Alexander Schreiber
                                   self.mac)
4132 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4133 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4134 a8083063 Iustin Pop
4135 973d7867 Iustin Pop
    if self.kernel_path is not None:
4136 973d7867 Iustin Pop
      self.do_kernel_path = True
4137 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4138 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4139 973d7867 Iustin Pop
4140 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4141 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4142 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4143 973d7867 Iustin Pop
                                    " filename")
4144 8cafeb26 Iustin Pop
    else:
4145 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4146 973d7867 Iustin Pop
4147 973d7867 Iustin Pop
    if self.initrd_path is not None:
4148 973d7867 Iustin Pop
      self.do_initrd_path = True
4149 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4150 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4151 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4152 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4153 973d7867 Iustin Pop
                                    " filename")
4154 8cafeb26 Iustin Pop
    else:
4155 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4156 973d7867 Iustin Pop
4157 25c5878d Alexander Schreiber
    # boot order verification
4158 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4159 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4160 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4161 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4162 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4163 25c5878d Alexander Schreiber
                                     " or 'default'")
4164 25c5878d Alexander Schreiber
4165 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
4166 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
4167 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
4168 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
4169 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
4170 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4171 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
4172 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
4173 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
4174 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
4175 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4176 31a853d2 Iustin Pop
4177 31a853d2 Iustin Pop
    # vnc_bind_address verification
4178 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
4179 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
4180 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
4181 31a853d2 Iustin Pop
                                   " like a valid IP address" %
4182 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
4183 31a853d2 Iustin Pop
4184 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4185 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
4186 a8083063 Iustin Pop
    if instance is None:
4187 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
4188 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4189 a8083063 Iustin Pop
    self.op.instance_name = instance.name
4190 a8083063 Iustin Pop
    self.instance = instance
4191 a8083063 Iustin Pop
    return
4192 a8083063 Iustin Pop
4193 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4194 a8083063 Iustin Pop
    """Modifies an instance.
4195 a8083063 Iustin Pop

4196 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4197 a8083063 Iustin Pop
    """
4198 a8083063 Iustin Pop
    result = []
4199 a8083063 Iustin Pop
    instance = self.instance
4200 a8083063 Iustin Pop
    if self.mem:
4201 a8083063 Iustin Pop
      instance.memory = self.mem
4202 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4203 a8083063 Iustin Pop
    if self.vcpus:
4204 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4205 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4206 a8083063 Iustin Pop
    if self.do_ip:
4207 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4208 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4209 a8083063 Iustin Pop
    if self.bridge:
4210 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4211 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4212 1862d460 Alexander Schreiber
    if self.mac:
4213 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4214 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4215 973d7867 Iustin Pop
    if self.do_kernel_path:
4216 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4217 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4218 973d7867 Iustin Pop
    if self.do_initrd_path:
4219 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4220 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4221 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4222 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4223 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4224 25c5878d Alexander Schreiber
      else:
4225 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4226 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4227 31a853d2 Iustin Pop
    if self.hvm_acpi:
4228 ec1ba002 Iustin Pop
      instance.hvm_acpi = self.hvm_acpi
4229 31a853d2 Iustin Pop
      result.append(("hvm_acpi", self.hvm_acpi))
4230 31a853d2 Iustin Pop
    if self.hvm_pae:
4231 ec1ba002 Iustin Pop
      instance.hvm_pae = self.hvm_pae
4232 31a853d2 Iustin Pop
      result.append(("hvm_pae", self.hvm_pae))
4233 31a853d2 Iustin Pop
    if self.hvm_cdrom_image_path:
4234 ec1ba002 Iustin Pop
      instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
4235 31a853d2 Iustin Pop
      result.append(("hvm_cdrom_image_path", self.hvm_cdrom_image_path))
4236 31a853d2 Iustin Pop
    if self.vnc_bind_address:
4237 31a853d2 Iustin Pop
      instance.vnc_bind_address = self.vnc_bind_address
4238 31a853d2 Iustin Pop
      result.append(("vnc_bind_address", self.vnc_bind_address))
4239 a8083063 Iustin Pop
4240 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
4241 a8083063 Iustin Pop
4242 a8083063 Iustin Pop
    return result
4243 a8083063 Iustin Pop
4244 a8083063 Iustin Pop
4245 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4246 a8083063 Iustin Pop
  """Query the exports list
4247 a8083063 Iustin Pop

4248 a8083063 Iustin Pop
  """
4249 a8083063 Iustin Pop
  _OP_REQP = []
4250 a8083063 Iustin Pop
4251 a8083063 Iustin Pop
  def CheckPrereq(self):
4252 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4253 a8083063 Iustin Pop

4254 a8083063 Iustin Pop
    """
4255 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4256 a8083063 Iustin Pop
4257 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4258 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4259 a8083063 Iustin Pop

4260 a8083063 Iustin Pop
    Returns:
4261 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4262 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4263 a8083063 Iustin Pop
      that node.
4264 a8083063 Iustin Pop

4265 a8083063 Iustin Pop
    """
4266 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4267 a8083063 Iustin Pop
4268 a8083063 Iustin Pop
4269 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4270 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4271 a8083063 Iustin Pop

4272 a8083063 Iustin Pop
  """
4273 a8083063 Iustin Pop
  HPATH = "instance-export"
4274 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4275 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4276 a8083063 Iustin Pop
4277 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4278 a8083063 Iustin Pop
    """Build hooks env.
4279 a8083063 Iustin Pop

4280 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4281 a8083063 Iustin Pop

4282 a8083063 Iustin Pop
    """
4283 a8083063 Iustin Pop
    env = {
4284 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4285 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4286 a8083063 Iustin Pop
      }
4287 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4288 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4289 a8083063 Iustin Pop
          self.op.target_node]
4290 a8083063 Iustin Pop
    return env, nl, nl
4291 a8083063 Iustin Pop
4292 a8083063 Iustin Pop
  def CheckPrereq(self):
4293 a8083063 Iustin Pop
    """Check prerequisites.
4294 a8083063 Iustin Pop

4295 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4296 a8083063 Iustin Pop

4297 a8083063 Iustin Pop
    """
4298 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4299 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4300 a8083063 Iustin Pop
    if self.instance is None:
4301 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4302 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4303 a8083063 Iustin Pop
4304 a8083063 Iustin Pop
    # node verification
4305 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4306 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4307 a8083063 Iustin Pop
4308 a8083063 Iustin Pop
    if self.dst_node is None:
4309 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4310 3ecf6786 Iustin Pop
                                 self.op.target_node)
4311 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4312 a8083063 Iustin Pop
4313 b6023d6c Manuel Franceschini
    # instance disk type verification
4314 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4315 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4316 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4317 b6023d6c Manuel Franceschini
                                   " file-based disks")
4318 b6023d6c Manuel Franceschini
4319 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4320 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4321 a8083063 Iustin Pop

4322 a8083063 Iustin Pop
    """
4323 a8083063 Iustin Pop
    instance = self.instance
4324 a8083063 Iustin Pop
    dst_node = self.dst_node
4325 a8083063 Iustin Pop
    src_node = instance.primary_node
4326 a8083063 Iustin Pop
    if self.op.shutdown:
4327 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4328 fb300fb7 Guido Trotter
      if not rpc.call_instance_shutdown(src_node, instance):
4329 fb300fb7 Guido Trotter
         raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4330 b4de68a9 Iustin Pop
                                  (instance.name, src_node))
4331 a8083063 Iustin Pop
4332 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4333 a8083063 Iustin Pop
4334 a8083063 Iustin Pop
    snap_disks = []
4335 a8083063 Iustin Pop
4336 a8083063 Iustin Pop
    try:
4337 a8083063 Iustin Pop
      for disk in instance.disks:
4338 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4339 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4340 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4341 a8083063 Iustin Pop
4342 a8083063 Iustin Pop
          if not new_dev_name:
4343 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4344 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4345 a8083063 Iustin Pop
          else:
4346 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4347 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4348 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4349 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4350 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4351 a8083063 Iustin Pop
4352 a8083063 Iustin Pop
    finally:
4353 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4354 fb300fb7 Guido Trotter
        if not rpc.call_instance_start(src_node, instance, None):
4355 fb300fb7 Guido Trotter
          _ShutdownInstanceDisks(instance, self.cfg)
4356 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4357 a8083063 Iustin Pop
4358 a8083063 Iustin Pop
    # TODO: check for size
4359 a8083063 Iustin Pop
4360 a8083063 Iustin Pop
    for dev in snap_disks:
4361 16687b98 Manuel Franceschini
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
4362 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4363 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4364 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4365 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4366 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4367 a8083063 Iustin Pop
4368 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4369 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4370 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4371 a8083063 Iustin Pop
4372 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4373 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4374 a8083063 Iustin Pop
4375 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4376 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4377 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4378 a8083063 Iustin Pop
    if nodelist:
4379 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4380 5bfac263 Iustin Pop
      exportlist = self.proc.ChainOpCode(op)
4381 a8083063 Iustin Pop
      for node in exportlist:
4382 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4383 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4384 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4385 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4386 5c947f38 Iustin Pop
4387 5c947f38 Iustin Pop
4388 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4389 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4390 9ac99fda Guido Trotter

4391 9ac99fda Guido Trotter
  """
4392 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4393 9ac99fda Guido Trotter
4394 9ac99fda Guido Trotter
  def CheckPrereq(self):
4395 9ac99fda Guido Trotter
    """Check prerequisites.
4396 9ac99fda Guido Trotter
    """
4397 9ac99fda Guido Trotter
    pass
4398 9ac99fda Guido Trotter
4399 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4400 9ac99fda Guido Trotter
    """Remove any export.
4401 9ac99fda Guido Trotter

4402 9ac99fda Guido Trotter
    """
4403 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4404 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4405 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4406 9ac99fda Guido Trotter
    fqdn_warn = False
4407 9ac99fda Guido Trotter
    if not instance_name:
4408 9ac99fda Guido Trotter
      fqdn_warn = True
4409 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4410 9ac99fda Guido Trotter
4411 9ac99fda Guido Trotter
    op = opcodes.OpQueryExports(nodes=[])
4412 9ac99fda Guido Trotter
    exportlist = self.proc.ChainOpCode(op)
4413 9ac99fda Guido Trotter
    found = False
4414 9ac99fda Guido Trotter
    for node in exportlist:
4415 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4416 9ac99fda Guido Trotter
        found = True
4417 9ac99fda Guido Trotter
        if not rpc.call_export_remove(node, instance_name):
4418 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4419 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4420 9ac99fda Guido Trotter
4421 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4422 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4423 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4424 9ac99fda Guido Trotter
                  " Domain Name.")
4425 9ac99fda Guido Trotter
4426 9ac99fda Guido Trotter
4427 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4428 5c947f38 Iustin Pop
  """Generic tags LU.
4429 5c947f38 Iustin Pop

4430 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4431 5c947f38 Iustin Pop

4432 5c947f38 Iustin Pop
  """
4433 5c947f38 Iustin Pop
  def CheckPrereq(self):
4434 5c947f38 Iustin Pop
    """Check prerequisites.
4435 5c947f38 Iustin Pop

4436 5c947f38 Iustin Pop
    """
4437 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4438 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4439 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4440 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4441 5c947f38 Iustin Pop
      if name is None:
4442 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4443 3ecf6786 Iustin Pop
                                   (self.op.name,))
4444 5c947f38 Iustin Pop
      self.op.name = name
4445 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4446 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4447 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4448 5c947f38 Iustin Pop
      if name is None:
4449 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4450 3ecf6786 Iustin Pop
                                   (self.op.name,))
4451 5c947f38 Iustin Pop
      self.op.name = name
4452 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4453 5c947f38 Iustin Pop
    else:
4454 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4455 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4456 5c947f38 Iustin Pop
4457 5c947f38 Iustin Pop
4458 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4459 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4460 5c947f38 Iustin Pop

4461 5c947f38 Iustin Pop
  """
4462 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4463 5c947f38 Iustin Pop
4464 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4465 5c947f38 Iustin Pop
    """Returns the tag list.
4466 5c947f38 Iustin Pop

4467 5c947f38 Iustin Pop
    """
4468 5c947f38 Iustin Pop
    return self.target.GetTags()
4469 5c947f38 Iustin Pop
4470 5c947f38 Iustin Pop
4471 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4472 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4473 73415719 Iustin Pop

4474 73415719 Iustin Pop
  """
4475 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4476 73415719 Iustin Pop
4477 73415719 Iustin Pop
  def CheckPrereq(self):
4478 73415719 Iustin Pop
    """Check prerequisites.
4479 73415719 Iustin Pop

4480 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4481 73415719 Iustin Pop

4482 73415719 Iustin Pop
    """
4483 73415719 Iustin Pop
    try:
4484 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4485 73415719 Iustin Pop
    except re.error, err:
4486 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4487 73415719 Iustin Pop
                                 (self.op.pattern, err))
4488 73415719 Iustin Pop
4489 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4490 73415719 Iustin Pop
    """Returns the tag list.
4491 73415719 Iustin Pop

4492 73415719 Iustin Pop
    """
4493 73415719 Iustin Pop
    cfg = self.cfg
4494 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4495 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4496 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4497 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4498 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4499 73415719 Iustin Pop
    results = []
4500 73415719 Iustin Pop
    for path, target in tgts:
4501 73415719 Iustin Pop
      for tag in target.GetTags():
4502 73415719 Iustin Pop
        if self.re.search(tag):
4503 73415719 Iustin Pop
          results.append((path, tag))
4504 73415719 Iustin Pop
    return results
4505 73415719 Iustin Pop
4506 73415719 Iustin Pop
4507 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4508 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4509 5c947f38 Iustin Pop

4510 5c947f38 Iustin Pop
  """
4511 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4512 5c947f38 Iustin Pop
4513 5c947f38 Iustin Pop
  def CheckPrereq(self):
4514 5c947f38 Iustin Pop
    """Check prerequisites.
4515 5c947f38 Iustin Pop

4516 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4517 5c947f38 Iustin Pop

4518 5c947f38 Iustin Pop
    """
4519 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4520 f27302fa Iustin Pop
    for tag in self.op.tags:
4521 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4522 5c947f38 Iustin Pop
4523 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4524 5c947f38 Iustin Pop
    """Sets the tag.
4525 5c947f38 Iustin Pop

4526 5c947f38 Iustin Pop
    """
4527 5c947f38 Iustin Pop
    try:
4528 f27302fa Iustin Pop
      for tag in self.op.tags:
4529 f27302fa Iustin Pop
        self.target.AddTag(tag)
4530 5c947f38 Iustin Pop
    except errors.TagError, err:
4531 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4532 5c947f38 Iustin Pop
    try:
4533 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4534 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4535 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4536 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4537 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4538 5c947f38 Iustin Pop
4539 5c947f38 Iustin Pop
4540 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4541 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4542 5c947f38 Iustin Pop

4543 5c947f38 Iustin Pop
  """
4544 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4545 5c947f38 Iustin Pop
4546 5c947f38 Iustin Pop
  def CheckPrereq(self):
4547 5c947f38 Iustin Pop
    """Check prerequisites.
4548 5c947f38 Iustin Pop

4549 5c947f38 Iustin Pop
    This checks that we have the given tag.
4550 5c947f38 Iustin Pop

4551 5c947f38 Iustin Pop
    """
4552 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4553 f27302fa Iustin Pop
    for tag in self.op.tags:
4554 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4555 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4556 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4557 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4558 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4559 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4560 f27302fa Iustin Pop
      diff_names.sort()
4561 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4562 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4563 5c947f38 Iustin Pop
4564 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4565 5c947f38 Iustin Pop
    """Remove the tag from the object.
4566 5c947f38 Iustin Pop

4567 5c947f38 Iustin Pop
    """
4568 f27302fa Iustin Pop
    for tag in self.op.tags:
4569 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4570 5c947f38 Iustin Pop
    try:
4571 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4572 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4573 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4574 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4575 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4576 06009e27 Iustin Pop
4577 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
4578 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
4579 06009e27 Iustin Pop

4580 06009e27 Iustin Pop
  This LU sleeps on the master and/or nodes for a specified amoutn of
4581 06009e27 Iustin Pop
  time.
4582 06009e27 Iustin Pop

4583 06009e27 Iustin Pop
  """
4584 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
4585 06009e27 Iustin Pop
4586 06009e27 Iustin Pop
  def CheckPrereq(self):
4587 06009e27 Iustin Pop
    """Check prerequisites.
4588 06009e27 Iustin Pop

4589 06009e27 Iustin Pop
    This checks that we have a good list of nodes and/or the duration
4590 06009e27 Iustin Pop
    is valid.
4591 06009e27 Iustin Pop

4592 06009e27 Iustin Pop
    """
4593 06009e27 Iustin Pop
4594 06009e27 Iustin Pop
    if self.op.on_nodes:
4595 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
4596 06009e27 Iustin Pop
4597 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
4598 06009e27 Iustin Pop
    """Do the actual sleep.
4599 06009e27 Iustin Pop

4600 06009e27 Iustin Pop
    """
4601 06009e27 Iustin Pop
    if self.op.on_master:
4602 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
4603 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
4604 06009e27 Iustin Pop
    if self.op.on_nodes:
4605 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
4606 06009e27 Iustin Pop
      if not result:
4607 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
4608 06009e27 Iustin Pop
      for node, node_result in result.items():
4609 06009e27 Iustin Pop
        if not node_result:
4610 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
4611 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
4612 d61df03e Iustin Pop
4613 d61df03e Iustin Pop
4614 d1c2dd75 Iustin Pop
class IAllocator(object):
4615 d1c2dd75 Iustin Pop
  """IAllocator framework.
4616 d61df03e Iustin Pop

4617 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
4618 d1c2dd75 Iustin Pop
    - cfg/sstore that are needed to query the cluster
4619 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
4620 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
4621 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
4622 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
4623 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
4624 d1c2dd75 Iustin Pop
      easy usage
4625 d61df03e Iustin Pop

4626 d61df03e Iustin Pop
  """
4627 29859cb7 Iustin Pop
  _ALLO_KEYS = [
4628 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
4629 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
4630 d1c2dd75 Iustin Pop
    ]
4631 29859cb7 Iustin Pop
  _RELO_KEYS = [
4632 29859cb7 Iustin Pop
    "relocate_from",
4633 29859cb7 Iustin Pop
    ]
4634 d1c2dd75 Iustin Pop
4635 29859cb7 Iustin Pop
  def __init__(self, cfg, sstore, mode, name, **kwargs):
4636 d1c2dd75 Iustin Pop
    self.cfg = cfg
4637 d1c2dd75 Iustin Pop
    self.sstore = sstore
4638 d1c2dd75 Iustin Pop
    # init buffer variables
4639 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
4640 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
4641 29859cb7 Iustin Pop
    self.mode = mode
4642 29859cb7 Iustin Pop
    self.name = name
4643 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
4644 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
4645 29859cb7 Iustin Pop
    self.relocate_from = None
4646 27579978 Iustin Pop
    # computed fields
4647 27579978 Iustin Pop
    self.required_nodes = None
4648 d1c2dd75 Iustin Pop
    # init result fields
4649 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
4650 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
4651 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
4652 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
4653 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
4654 29859cb7 Iustin Pop
    else:
4655 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
4656 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
4657 d1c2dd75 Iustin Pop
    for key in kwargs:
4658 29859cb7 Iustin Pop
      if key not in keyset:
4659 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
4660 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4661 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
4662 29859cb7 Iustin Pop
    for key in keyset:
4663 d1c2dd75 Iustin Pop
      if key not in kwargs:
4664 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
4665 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4666 d1c2dd75 Iustin Pop
    self._BuildInputData()
4667 d1c2dd75 Iustin Pop
4668 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
4669 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
4670 d1c2dd75 Iustin Pop

4671 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
4672 d1c2dd75 Iustin Pop

4673 d1c2dd75 Iustin Pop
    """
4674 d1c2dd75 Iustin Pop
    cfg = self.cfg
4675 d1c2dd75 Iustin Pop
    # cluster data
4676 d1c2dd75 Iustin Pop
    data = {
4677 d1c2dd75 Iustin Pop
      "version": 1,
4678 d1c2dd75 Iustin Pop
      "cluster_name": self.sstore.GetClusterName(),
4679 d1c2dd75 Iustin Pop
      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
4680 6286519f Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
4681 d1c2dd75 Iustin Pop
      # we don't have job IDs
4682 d61df03e Iustin Pop
      }
4683 d61df03e Iustin Pop
4684 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
4685 6286519f Iustin Pop
4686 d1c2dd75 Iustin Pop
    # node data
4687 d1c2dd75 Iustin Pop
    node_results = {}
4688 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
4689 d1c2dd75 Iustin Pop
    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
4690 d1c2dd75 Iustin Pop
    for nname in node_list:
4691 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
4692 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
4693 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
4694 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
4695 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
4696 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
4697 d1c2dd75 Iustin Pop
        if attr not in remote_info:
4698 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
4699 d1c2dd75 Iustin Pop
                                   (nname, attr))
4700 d1c2dd75 Iustin Pop
        try:
4701 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
4702 d1c2dd75 Iustin Pop
        except ValueError, err:
4703 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
4704 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
4705 6286519f Iustin Pop
      # compute memory used by primary instances
4706 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
4707 6286519f Iustin Pop
      for iinfo in i_list:
4708 6286519f Iustin Pop
        if iinfo.primary_node == nname:
4709 6286519f Iustin Pop
          i_p_mem += iinfo.memory
4710 6286519f Iustin Pop
          if iinfo.status == "up":
4711 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
4712 6286519f Iustin Pop
4713 b2662e7f Iustin Pop
      # compute memory used by instances
4714 d1c2dd75 Iustin Pop
      pnr = {
4715 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
4716 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
4717 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
4718 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
4719 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
4720 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
4721 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
4722 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
4723 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
4724 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
4725 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
4726 d1c2dd75 Iustin Pop
        }
4727 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
4728 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
4729 d1c2dd75 Iustin Pop
4730 d1c2dd75 Iustin Pop
    # instance data
4731 d1c2dd75 Iustin Pop
    instance_data = {}
4732 6286519f Iustin Pop
    for iinfo in i_list:
4733 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
4734 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
4735 d1c2dd75 Iustin Pop
      pir = {
4736 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
4737 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
4738 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
4739 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
4740 d1c2dd75 Iustin Pop
        "os": iinfo.os,
4741 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
4742 d1c2dd75 Iustin Pop
        "nics": nic_data,
4743 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
4744 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
4745 d1c2dd75 Iustin Pop
        }
4746 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
4747 d61df03e Iustin Pop
4748 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
4749 d61df03e Iustin Pop
4750 d1c2dd75 Iustin Pop
    self.in_data = data
4751 d61df03e Iustin Pop
4752 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
4753 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
4754 d61df03e Iustin Pop

4755 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
4756 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
4757 d61df03e Iustin Pop

4758 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
4759 d1c2dd75 Iustin Pop
    done.
4760 d61df03e Iustin Pop

4761 d1c2dd75 Iustin Pop
    """
4762 d1c2dd75 Iustin Pop
    data = self.in_data
4763 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
4764 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
4765 d1c2dd75 Iustin Pop
4766 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
4767 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
4768 d1c2dd75 Iustin Pop
4769 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
4770 27579978 Iustin Pop
      self.required_nodes = 2
4771 27579978 Iustin Pop
    else:
4772 27579978 Iustin Pop
      self.required_nodes = 1
4773 d1c2dd75 Iustin Pop
    request = {
4774 d1c2dd75 Iustin Pop
      "type": "allocate",
4775 d1c2dd75 Iustin Pop
      "name": self.name,
4776 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
4777 d1c2dd75 Iustin Pop
      "tags": self.tags,
4778 d1c2dd75 Iustin Pop
      "os": self.os,
4779 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
4780 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
4781 d1c2dd75 Iustin Pop
      "disks": self.disks,
4782 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
4783 d1c2dd75 Iustin Pop
      "nics": self.nics,
4784 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
4785 d1c2dd75 Iustin Pop
      }
4786 d1c2dd75 Iustin Pop
    data["request"] = request
4787 298fe380 Iustin Pop
4788 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
4789 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
4790 298fe380 Iustin Pop

4791 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
4792 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
4793 d61df03e Iustin Pop

4794 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
4795 d1c2dd75 Iustin Pop
    done.
4796 d61df03e Iustin Pop

4797 d1c2dd75 Iustin Pop
    """
4798 27579978 Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.name)
4799 27579978 Iustin Pop
    if instance is None:
4800 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
4801 27579978 Iustin Pop
                                   " IAllocator" % self.name)
4802 27579978 Iustin Pop
4803 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4804 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
4805 27579978 Iustin Pop
4806 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
4807 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
4808 2a139bb0 Iustin Pop
4809 27579978 Iustin Pop
    self.required_nodes = 1
4810 27579978 Iustin Pop
4811 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
4812 27579978 Iustin Pop
                                  instance.disks[0].size,
4813 27579978 Iustin Pop
                                  instance.disks[1].size)
4814 27579978 Iustin Pop
4815 d1c2dd75 Iustin Pop
    request = {
4816 2a139bb0 Iustin Pop
      "type": "relocate",
4817 d1c2dd75 Iustin Pop
      "name": self.name,
4818 27579978 Iustin Pop
      "disk_space_total": disk_space,
4819 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
4820 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
4821 d1c2dd75 Iustin Pop
      }
4822 27579978 Iustin Pop
    self.in_data["request"] = request
4823 d61df03e Iustin Pop
4824 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
4825 d1c2dd75 Iustin Pop
    """Build input data structures.
4826 d61df03e Iustin Pop

4827 d1c2dd75 Iustin Pop
    """
4828 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
4829 d61df03e Iustin Pop
4830 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
4831 d1c2dd75 Iustin Pop
      self._AddNewInstance()
4832 d1c2dd75 Iustin Pop
    else:
4833 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
4834 d61df03e Iustin Pop
4835 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
4836 d61df03e Iustin Pop
4837 8d528b7c Iustin Pop
  def Run(self, name, validate=True, call_fn=rpc.call_iallocator_runner):
4838 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
4839 298fe380 Iustin Pop

4840 d1c2dd75 Iustin Pop
    """
4841 d1c2dd75 Iustin Pop
    data = self.in_text
4842 298fe380 Iustin Pop
4843 8d528b7c Iustin Pop
    result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
4844 298fe380 Iustin Pop
4845 8d528b7c Iustin Pop
    if not isinstance(result, tuple) or len(result) != 4:
4846 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
4847 8d528b7c Iustin Pop
4848 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
4849 8d528b7c Iustin Pop
4850 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
4851 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
4852 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
4853 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Instance allocator call failed: %s,"
4854 d1c2dd75 Iustin Pop
                                 " output: %s" %
4855 8d528b7c Iustin Pop
                                 (fail, stdout+stderr))
4856 8d528b7c Iustin Pop
    self.out_text = stdout
4857 d1c2dd75 Iustin Pop
    if validate:
4858 d1c2dd75 Iustin Pop
      self._ValidateResult()
4859 298fe380 Iustin Pop
4860 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
4861 d1c2dd75 Iustin Pop
    """Process the allocator results.
4862 538475ca Iustin Pop

4863 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
4864 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
4865 538475ca Iustin Pop

4866 d1c2dd75 Iustin Pop
    """
4867 d1c2dd75 Iustin Pop
    try:
4868 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
4869 d1c2dd75 Iustin Pop
    except Exception, err:
4870 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
4871 d1c2dd75 Iustin Pop
4872 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
4873 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
4874 538475ca Iustin Pop
4875 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
4876 d1c2dd75 Iustin Pop
      if key not in rdict:
4877 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
4878 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
4879 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
4880 538475ca Iustin Pop
4881 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
4882 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
4883 d1c2dd75 Iustin Pop
                               " is not a list")
4884 d1c2dd75 Iustin Pop
    self.out_data = rdict
4885 538475ca Iustin Pop
4886 538475ca Iustin Pop
4887 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
4888 d61df03e Iustin Pop
  """Run allocator tests.
4889 d61df03e Iustin Pop

4890 d61df03e Iustin Pop
  This LU runs the allocator tests
4891 d61df03e Iustin Pop

4892 d61df03e Iustin Pop
  """
4893 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
4894 d61df03e Iustin Pop
4895 d61df03e Iustin Pop
  def CheckPrereq(self):
4896 d61df03e Iustin Pop
    """Check prerequisites.
4897 d61df03e Iustin Pop

4898 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
4899 d61df03e Iustin Pop

4900 d61df03e Iustin Pop
    """
4901 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
4902 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
4903 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
4904 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
4905 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
4906 d61df03e Iustin Pop
                                     attr)
4907 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
4908 d61df03e Iustin Pop
      if iname is not None:
4909 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
4910 d61df03e Iustin Pop
                                   iname)
4911 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
4912 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
4913 d61df03e Iustin Pop
      for row in self.op.nics:
4914 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
4915 d61df03e Iustin Pop
            "mac" not in row or
4916 d61df03e Iustin Pop
            "ip" not in row or
4917 d61df03e Iustin Pop
            "bridge" not in row):
4918 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
4919 d61df03e Iustin Pop
                                     " 'nics' parameter")
4920 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
4921 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
4922 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
4923 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
4924 d61df03e Iustin Pop
      for row in self.op.disks:
4925 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
4926 d61df03e Iustin Pop
            "size" not in row or
4927 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
4928 d61df03e Iustin Pop
            "mode" not in row or
4929 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
4930 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
4931 d61df03e Iustin Pop
                                     " 'disks' parameter")
4932 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
4933 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
4934 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
4935 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
4936 d61df03e Iustin Pop
      if fname is None:
4937 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
4938 d61df03e Iustin Pop
                                   self.op.name)
4939 d61df03e Iustin Pop
      self.op.name = fname
4940 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
4941 d61df03e Iustin Pop
    else:
4942 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
4943 d61df03e Iustin Pop
                                 self.op.mode)
4944 d61df03e Iustin Pop
4945 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
4946 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
4947 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
4948 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
4949 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
4950 d61df03e Iustin Pop
                                 self.op.direction)
4951 d61df03e Iustin Pop
4952 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
4953 d61df03e Iustin Pop
    """Run the allocator test.
4954 d61df03e Iustin Pop

4955 d61df03e Iustin Pop
    """
4956 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
4957 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
4958 29859cb7 Iustin Pop
                       mode=self.op.mode,
4959 29859cb7 Iustin Pop
                       name=self.op.name,
4960 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
4961 29859cb7 Iustin Pop
                       disks=self.op.disks,
4962 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
4963 29859cb7 Iustin Pop
                       os=self.op.os,
4964 29859cb7 Iustin Pop
                       tags=self.op.tags,
4965 29859cb7 Iustin Pop
                       nics=self.op.nics,
4966 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
4967 29859cb7 Iustin Pop
                       )
4968 29859cb7 Iustin Pop
    else:
4969 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
4970 29859cb7 Iustin Pop
                       mode=self.op.mode,
4971 29859cb7 Iustin Pop
                       name=self.op.name,
4972 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
4973 29859cb7 Iustin Pop
                       )
4974 d61df03e Iustin Pop
4975 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
4976 d1c2dd75 Iustin Pop
      result = ial.in_text
4977 298fe380 Iustin Pop
    else:
4978 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
4979 d1c2dd75 Iustin Pop
      result = ial.out_text
4980 298fe380 Iustin Pop
    return result