Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 42a999d1

History | View | Annotate | Download (167.7 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 a8083063 Iustin Pop
from ganeti import config
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 a8083063 Iustin Pop
from ganeti import ssconf
45 8d14b30d Iustin Pop
from ganeti import serializer
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
53 a8083063 Iustin Pop
      with all the fields (even if as None)
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 05f86716 Guido Trotter
    - optionally redefine their run requirements:
58 05f86716 Guido Trotter
        REQ_MASTER: the LU needs to run on the master node
59 05f86716 Guido Trotter
        REQ_WSSTORE: the LU needs a writable SimpleStore
60 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
61 05f86716 Guido Trotter

62 05f86716 Guido Trotter
  Note that all commands require root permissions.
63 a8083063 Iustin Pop

64 a8083063 Iustin Pop
  """
65 a8083063 Iustin Pop
  HPATH = None
66 a8083063 Iustin Pop
  HTYPE = None
67 a8083063 Iustin Pop
  _OP_REQP = []
68 a8083063 Iustin Pop
  REQ_MASTER = True
69 05f86716 Guido Trotter
  REQ_WSSTORE = False
70 7e55040e Guido Trotter
  REQ_BGL = True
71 a8083063 Iustin Pop
72 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
73 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
74 a8083063 Iustin Pop

75 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
76 a8083063 Iustin Pop
    validity.
77 a8083063 Iustin Pop

78 a8083063 Iustin Pop
    """
79 5bfac263 Iustin Pop
    self.proc = processor
80 a8083063 Iustin Pop
    self.op = op
81 a8083063 Iustin Pop
    self.cfg = cfg
82 a8083063 Iustin Pop
    self.sstore = sstore
83 c92b310a Michael Hanselmann
    self.__ssh = None
84 c92b310a Michael Hanselmann
85 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
86 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
87 a8083063 Iustin Pop
      if attr_val is None:
88 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
89 3ecf6786 Iustin Pop
                                   attr_name)
90 c6d58a2b Michael Hanselmann
91 c6d58a2b Michael Hanselmann
    if not cfg.IsCluster():
92 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
93 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
94 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
95 c6d58a2b Michael Hanselmann
      master = sstore.GetMasterNode()
96 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
97 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
98 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
99 a8083063 Iustin Pop
100 c92b310a Michael Hanselmann
  def __GetSSH(self):
101 c92b310a Michael Hanselmann
    """Returns the SshRunner object
102 c92b310a Michael Hanselmann

103 c92b310a Michael Hanselmann
    """
104 c92b310a Michael Hanselmann
    if not self.__ssh:
105 1ff08570 Michael Hanselmann
      self.__ssh = ssh.SshRunner(self.sstore)
106 c92b310a Michael Hanselmann
    return self.__ssh
107 c92b310a Michael Hanselmann
108 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
109 c92b310a Michael Hanselmann
110 a8083063 Iustin Pop
  def CheckPrereq(self):
111 a8083063 Iustin Pop
    """Check prerequisites for this LU.
112 a8083063 Iustin Pop

113 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
114 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
115 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
116 a8083063 Iustin Pop
    allowed.
117 a8083063 Iustin Pop

118 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
119 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
120 a8083063 Iustin Pop

121 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
122 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
123 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
124 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
125 a8083063 Iustin Pop

126 a8083063 Iustin Pop
    """
127 a8083063 Iustin Pop
    raise NotImplementedError
128 a8083063 Iustin Pop
129 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
130 a8083063 Iustin Pop
    """Execute the LU.
131 a8083063 Iustin Pop

132 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
133 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
134 a8083063 Iustin Pop
    code, or expected.
135 a8083063 Iustin Pop

136 a8083063 Iustin Pop
    """
137 a8083063 Iustin Pop
    raise NotImplementedError
138 a8083063 Iustin Pop
139 a8083063 Iustin Pop
  def BuildHooksEnv(self):
140 a8083063 Iustin Pop
    """Build hooks environment for this LU.
141 a8083063 Iustin Pop

142 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
143 a8083063 Iustin Pop
    containing the environment that will be used for running the
144 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
145 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
146 a8083063 Iustin Pop
    the hook should run after the execution.
147 a8083063 Iustin Pop

148 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
149 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
150 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
151 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
152 a8083063 Iustin Pop

153 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
154 a8083063 Iustin Pop

155 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
156 a8083063 Iustin Pop
    not be called.
157 a8083063 Iustin Pop

158 a8083063 Iustin Pop
    """
159 a8083063 Iustin Pop
    raise NotImplementedError
160 a8083063 Iustin Pop
161 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
162 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
163 1fce5219 Guido Trotter

164 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
165 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
166 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
167 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
168 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
169 1fce5219 Guido Trotter

170 1fce5219 Guido Trotter
    Args:
171 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
172 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
173 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
174 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
175 1fce5219 Guido Trotter

176 1fce5219 Guido Trotter
    """
177 1fce5219 Guido Trotter
    return lu_result
178 1fce5219 Guido Trotter
179 a8083063 Iustin Pop
180 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
181 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
182 a8083063 Iustin Pop

183 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
184 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
185 a8083063 Iustin Pop

186 a8083063 Iustin Pop
  """
187 a8083063 Iustin Pop
  HPATH = None
188 a8083063 Iustin Pop
  HTYPE = None
189 a8083063 Iustin Pop
190 a8083063 Iustin Pop
191 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
192 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
193 83120a01 Michael Hanselmann

194 83120a01 Michael Hanselmann
  Args:
195 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
196 83120a01 Michael Hanselmann

197 83120a01 Michael Hanselmann
  """
198 3312b702 Iustin Pop
  if not isinstance(nodes, list):
199 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
200 dcb93971 Michael Hanselmann
201 dcb93971 Michael Hanselmann
  if nodes:
202 3312b702 Iustin Pop
    wanted = []
203 dcb93971 Michael Hanselmann
204 dcb93971 Michael Hanselmann
    for name in nodes:
205 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
206 dcb93971 Michael Hanselmann
      if node is None:
207 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
208 3312b702 Iustin Pop
      wanted.append(node)
209 dcb93971 Michael Hanselmann
210 dcb93971 Michael Hanselmann
  else:
211 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
212 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
213 3312b702 Iustin Pop
214 3312b702 Iustin Pop
215 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
216 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
217 3312b702 Iustin Pop

218 3312b702 Iustin Pop
  Args:
219 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
220 3312b702 Iustin Pop

221 3312b702 Iustin Pop
  """
222 3312b702 Iustin Pop
  if not isinstance(instances, list):
223 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
224 3312b702 Iustin Pop
225 3312b702 Iustin Pop
  if instances:
226 3312b702 Iustin Pop
    wanted = []
227 3312b702 Iustin Pop
228 3312b702 Iustin Pop
    for name in instances:
229 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
230 3312b702 Iustin Pop
      if instance is None:
231 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
232 3312b702 Iustin Pop
      wanted.append(instance)
233 3312b702 Iustin Pop
234 3312b702 Iustin Pop
  else:
235 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
236 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
237 dcb93971 Michael Hanselmann
238 dcb93971 Michael Hanselmann
239 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
240 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
241 83120a01 Michael Hanselmann

242 83120a01 Michael Hanselmann
  Args:
243 83120a01 Michael Hanselmann
    static: Static fields
244 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
245 83120a01 Michael Hanselmann

246 83120a01 Michael Hanselmann
  """
247 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
248 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
249 dcb93971 Michael Hanselmann
250 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
251 dcb93971 Michael Hanselmann
252 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
253 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
254 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
255 3ecf6786 Iustin Pop
                                          difference(all_fields)))
256 dcb93971 Michael Hanselmann
257 dcb93971 Michael Hanselmann
258 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
259 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
260 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
261 ecb215b5 Michael Hanselmann

262 ecb215b5 Michael Hanselmann
  Args:
263 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
264 396e1b78 Michael Hanselmann
  """
265 396e1b78 Michael Hanselmann
  env = {
266 0e137c28 Iustin Pop
    "OP_TARGET": name,
267 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
268 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
269 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
270 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
271 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
272 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
273 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
274 396e1b78 Michael Hanselmann
  }
275 396e1b78 Michael Hanselmann
276 396e1b78 Michael Hanselmann
  if nics:
277 396e1b78 Michael Hanselmann
    nic_count = len(nics)
278 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
279 396e1b78 Michael Hanselmann
      if ip is None:
280 396e1b78 Michael Hanselmann
        ip = ""
281 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
282 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
283 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
284 396e1b78 Michael Hanselmann
  else:
285 396e1b78 Michael Hanselmann
    nic_count = 0
286 396e1b78 Michael Hanselmann
287 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
288 396e1b78 Michael Hanselmann
289 396e1b78 Michael Hanselmann
  return env
290 396e1b78 Michael Hanselmann
291 396e1b78 Michael Hanselmann
292 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
293 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
294 ecb215b5 Michael Hanselmann

295 ecb215b5 Michael Hanselmann
  Args:
296 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
297 ecb215b5 Michael Hanselmann
    override: dict of values to override
298 ecb215b5 Michael Hanselmann
  """
299 396e1b78 Michael Hanselmann
  args = {
300 396e1b78 Michael Hanselmann
    'name': instance.name,
301 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
302 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
303 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
304 396e1b78 Michael Hanselmann
    'status': instance.os,
305 396e1b78 Michael Hanselmann
    'memory': instance.memory,
306 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
307 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
308 396e1b78 Michael Hanselmann
  }
309 396e1b78 Michael Hanselmann
  if override:
310 396e1b78 Michael Hanselmann
    args.update(override)
311 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
312 396e1b78 Michael Hanselmann
313 396e1b78 Michael Hanselmann
314 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
315 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
316 bf6929a2 Alexander Schreiber

317 bf6929a2 Alexander Schreiber
  """
318 bf6929a2 Alexander Schreiber
  # check bridges existance
319 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
320 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
321 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
322 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
323 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
324 bf6929a2 Alexander Schreiber
325 bf6929a2 Alexander Schreiber
326 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
327 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
328 a8083063 Iustin Pop

329 a8083063 Iustin Pop
  """
330 a8083063 Iustin Pop
  _OP_REQP = []
331 a8083063 Iustin Pop
332 a8083063 Iustin Pop
  def CheckPrereq(self):
333 a8083063 Iustin Pop
    """Check prerequisites.
334 a8083063 Iustin Pop

335 a8083063 Iustin Pop
    This checks whether the cluster is empty.
336 a8083063 Iustin Pop

337 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
338 a8083063 Iustin Pop

339 a8083063 Iustin Pop
    """
340 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
341 a8083063 Iustin Pop
342 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
343 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
344 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
345 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
346 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
347 db915bd1 Michael Hanselmann
    if instancelist:
348 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
349 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
350 a8083063 Iustin Pop
351 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
352 a8083063 Iustin Pop
    """Destroys the cluster.
353 a8083063 Iustin Pop

354 a8083063 Iustin Pop
    """
355 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
356 c9064964 Iustin Pop
    if not rpc.call_node_stop_master(master):
357 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
358 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
359 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
360 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
361 c8a0948f Michael Hanselmann
    rpc.call_node_leave_cluster(master)
362 a8083063 Iustin Pop
363 a8083063 Iustin Pop
364 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
365 a8083063 Iustin Pop
  """Verifies the cluster status.
366 a8083063 Iustin Pop

367 a8083063 Iustin Pop
  """
368 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
369 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
370 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
371 a8083063 Iustin Pop
372 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
373 a8083063 Iustin Pop
                  remote_version, feedback_fn):
374 a8083063 Iustin Pop
    """Run multiple tests against a node.
375 a8083063 Iustin Pop

376 a8083063 Iustin Pop
    Test list:
377 a8083063 Iustin Pop
      - compares ganeti version
378 a8083063 Iustin Pop
      - checks vg existance and size > 20G
379 a8083063 Iustin Pop
      - checks config file checksum
380 a8083063 Iustin Pop
      - checks ssh to other nodes
381 a8083063 Iustin Pop

382 a8083063 Iustin Pop
    Args:
383 a8083063 Iustin Pop
      node: name of the node to check
384 a8083063 Iustin Pop
      file_list: required list of files
385 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
386 098c0958 Michael Hanselmann

387 a8083063 Iustin Pop
    """
388 a8083063 Iustin Pop
    # compares ganeti version
389 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
390 a8083063 Iustin Pop
    if not remote_version:
391 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
392 a8083063 Iustin Pop
      return True
393 a8083063 Iustin Pop
394 a8083063 Iustin Pop
    if local_version != remote_version:
395 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
396 a8083063 Iustin Pop
                      (local_version, node, remote_version))
397 a8083063 Iustin Pop
      return True
398 a8083063 Iustin Pop
399 a8083063 Iustin Pop
    # checks vg existance and size > 20G
400 a8083063 Iustin Pop
401 a8083063 Iustin Pop
    bad = False
402 a8083063 Iustin Pop
    if not vglist:
403 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
404 a8083063 Iustin Pop
                      (node,))
405 a8083063 Iustin Pop
      bad = True
406 a8083063 Iustin Pop
    else:
407 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
408 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
409 a8083063 Iustin Pop
      if vgstatus:
410 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
411 a8083063 Iustin Pop
        bad = True
412 a8083063 Iustin Pop
413 a8083063 Iustin Pop
    # checks config file checksum
414 a8083063 Iustin Pop
    # checks ssh to any
415 a8083063 Iustin Pop
416 a8083063 Iustin Pop
    if 'filelist' not in node_result:
417 a8083063 Iustin Pop
      bad = True
418 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
419 a8083063 Iustin Pop
    else:
420 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
421 a8083063 Iustin Pop
      for file_name in file_list:
422 a8083063 Iustin Pop
        if file_name not in remote_cksum:
423 a8083063 Iustin Pop
          bad = True
424 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
425 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
426 a8083063 Iustin Pop
          bad = True
427 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
428 a8083063 Iustin Pop
429 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
430 a8083063 Iustin Pop
      bad = True
431 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
432 a8083063 Iustin Pop
    else:
433 a8083063 Iustin Pop
      if node_result['nodelist']:
434 a8083063 Iustin Pop
        bad = True
435 a8083063 Iustin Pop
        for node in node_result['nodelist']:
436 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
437 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
438 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
439 9d4bfc96 Iustin Pop
      bad = True
440 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
441 9d4bfc96 Iustin Pop
    else:
442 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
443 9d4bfc96 Iustin Pop
        bad = True
444 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
445 9d4bfc96 Iustin Pop
        for node in nlist:
446 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
447 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
448 9d4bfc96 Iustin Pop
449 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
450 a8083063 Iustin Pop
    if hyp_result is not None:
451 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
452 a8083063 Iustin Pop
    return bad
453 a8083063 Iustin Pop
454 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
455 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
456 a8083063 Iustin Pop
    """Verify an instance.
457 a8083063 Iustin Pop

458 a8083063 Iustin Pop
    This function checks to see if the required block devices are
459 a8083063 Iustin Pop
    available on the instance's node.
460 a8083063 Iustin Pop

461 a8083063 Iustin Pop
    """
462 a8083063 Iustin Pop
    bad = False
463 a8083063 Iustin Pop
464 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
465 a8083063 Iustin Pop
466 a8083063 Iustin Pop
    node_vol_should = {}
467 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
468 a8083063 Iustin Pop
469 a8083063 Iustin Pop
    for node in node_vol_should:
470 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
471 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
472 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
473 a8083063 Iustin Pop
                          (volume, node))
474 a8083063 Iustin Pop
          bad = True
475 a8083063 Iustin Pop
476 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
477 a872dae6 Guido Trotter
      if (node_current not in node_instance or
478 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
479 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
480 a8083063 Iustin Pop
                        (instance, node_current))
481 a8083063 Iustin Pop
        bad = True
482 a8083063 Iustin Pop
483 a8083063 Iustin Pop
    for node in node_instance:
484 a8083063 Iustin Pop
      if (not node == node_current):
485 a8083063 Iustin Pop
        if instance in node_instance[node]:
486 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
487 a8083063 Iustin Pop
                          (instance, node))
488 a8083063 Iustin Pop
          bad = True
489 a8083063 Iustin Pop
490 6a438c98 Michael Hanselmann
    return bad
491 a8083063 Iustin Pop
492 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
493 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
494 a8083063 Iustin Pop

495 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
496 a8083063 Iustin Pop
    reported as unknown.
497 a8083063 Iustin Pop

498 a8083063 Iustin Pop
    """
499 a8083063 Iustin Pop
    bad = False
500 a8083063 Iustin Pop
501 a8083063 Iustin Pop
    for node in node_vol_is:
502 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
503 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
504 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
505 a8083063 Iustin Pop
                      (volume, node))
506 a8083063 Iustin Pop
          bad = True
507 a8083063 Iustin Pop
    return bad
508 a8083063 Iustin Pop
509 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
510 a8083063 Iustin Pop
    """Verify the list of running instances.
511 a8083063 Iustin Pop

512 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
513 a8083063 Iustin Pop

514 a8083063 Iustin Pop
    """
515 a8083063 Iustin Pop
    bad = False
516 a8083063 Iustin Pop
    for node in node_instance:
517 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
518 a8083063 Iustin Pop
        if runninginstance not in instancelist:
519 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
520 a8083063 Iustin Pop
                          (runninginstance, node))
521 a8083063 Iustin Pop
          bad = True
522 a8083063 Iustin Pop
    return bad
523 a8083063 Iustin Pop
524 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
525 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
526 2b3b6ddd Guido Trotter

527 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
528 2b3b6ddd Guido Trotter
    was primary for.
529 2b3b6ddd Guido Trotter

530 2b3b6ddd Guido Trotter
    """
531 2b3b6ddd Guido Trotter
    bad = False
532 2b3b6ddd Guido Trotter
533 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
534 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
535 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
536 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
537 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
538 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
539 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
540 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
541 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
542 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
543 2b3b6ddd Guido Trotter
        needed_mem = 0
544 2b3b6ddd Guido Trotter
        for instance in instances:
545 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
546 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
547 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
548 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
549 2b3b6ddd Guido Trotter
          bad = True
550 2b3b6ddd Guido Trotter
    return bad
551 2b3b6ddd Guido Trotter
552 a8083063 Iustin Pop
  def CheckPrereq(self):
553 a8083063 Iustin Pop
    """Check prerequisites.
554 a8083063 Iustin Pop

555 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
556 e54c4c5e Guido Trotter
    all its members are valid.
557 a8083063 Iustin Pop

558 a8083063 Iustin Pop
    """
559 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
560 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
561 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
562 a8083063 Iustin Pop
563 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
564 d8fff41c Guido Trotter
    """Build hooks env.
565 d8fff41c Guido Trotter

566 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
567 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
568 d8fff41c Guido Trotter

569 d8fff41c Guido Trotter
    """
570 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
571 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
572 d8fff41c Guido Trotter
    env = {}
573 d8fff41c Guido Trotter
    return env, [], all_nodes
574 d8fff41c Guido Trotter
575 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
576 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
577 a8083063 Iustin Pop

578 a8083063 Iustin Pop
    """
579 a8083063 Iustin Pop
    bad = False
580 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
581 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
582 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
583 a8083063 Iustin Pop
584 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
585 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
586 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
587 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
588 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
589 a8083063 Iustin Pop
    node_volume = {}
590 a8083063 Iustin Pop
    node_instance = {}
591 9c9c7d30 Guido Trotter
    node_info = {}
592 26b6af5e Guido Trotter
    instance_cfg = {}
593 a8083063 Iustin Pop
594 a8083063 Iustin Pop
    # FIXME: verify OS list
595 a8083063 Iustin Pop
    # do local checksums
596 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
597 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
598 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
599 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
600 a8083063 Iustin Pop
601 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
602 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
603 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
604 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
605 a8083063 Iustin Pop
    node_verify_param = {
606 a8083063 Iustin Pop
      'filelist': file_names,
607 a8083063 Iustin Pop
      'nodelist': nodelist,
608 a8083063 Iustin Pop
      'hypervisor': None,
609 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
610 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
611 a8083063 Iustin Pop
      }
612 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
613 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
614 9c9c7d30 Guido Trotter
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
615 a8083063 Iustin Pop
616 a8083063 Iustin Pop
    for node in nodelist:
617 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
618 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
619 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
620 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
621 a8083063 Iustin Pop
      bad = bad or result
622 a8083063 Iustin Pop
623 a8083063 Iustin Pop
      # node_volume
624 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
625 a8083063 Iustin Pop
626 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
627 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
628 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
629 b63ed789 Iustin Pop
        bad = True
630 b63ed789 Iustin Pop
        node_volume[node] = {}
631 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
632 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
633 a8083063 Iustin Pop
        bad = True
634 a8083063 Iustin Pop
        continue
635 b63ed789 Iustin Pop
      else:
636 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
637 a8083063 Iustin Pop
638 a8083063 Iustin Pop
      # node_instance
639 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
640 a8083063 Iustin Pop
      if type(nodeinstance) != list:
641 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
642 a8083063 Iustin Pop
        bad = True
643 a8083063 Iustin Pop
        continue
644 a8083063 Iustin Pop
645 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
646 a8083063 Iustin Pop
647 9c9c7d30 Guido Trotter
      # node_info
648 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
649 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
650 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
651 9c9c7d30 Guido Trotter
        bad = True
652 9c9c7d30 Guido Trotter
        continue
653 9c9c7d30 Guido Trotter
654 9c9c7d30 Guido Trotter
      try:
655 9c9c7d30 Guido Trotter
        node_info[node] = {
656 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
657 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
658 93e4c50b Guido Trotter
          "pinst": [],
659 93e4c50b Guido Trotter
          "sinst": [],
660 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
661 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
662 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
663 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
664 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
665 36e7da50 Guido Trotter
          # secondary.
666 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
667 9c9c7d30 Guido Trotter
        }
668 9c9c7d30 Guido Trotter
      except ValueError:
669 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
670 9c9c7d30 Guido Trotter
        bad = True
671 9c9c7d30 Guido Trotter
        continue
672 9c9c7d30 Guido Trotter
673 a8083063 Iustin Pop
    node_vol_should = {}
674 a8083063 Iustin Pop
675 a8083063 Iustin Pop
    for instance in instancelist:
676 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
677 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
678 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
679 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
680 c5705f58 Guido Trotter
      bad = bad or result
681 a8083063 Iustin Pop
682 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
683 a8083063 Iustin Pop
684 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
685 26b6af5e Guido Trotter
686 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
687 93e4c50b Guido Trotter
      if pnode in node_info:
688 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
689 93e4c50b Guido Trotter
      else:
690 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
691 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
692 93e4c50b Guido Trotter
        bad = True
693 93e4c50b Guido Trotter
694 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
695 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
696 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
697 93e4c50b Guido Trotter
      # supported either.
698 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
699 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
700 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
701 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
702 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
703 93e4c50b Guido Trotter
                    % instance)
704 93e4c50b Guido Trotter
705 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
706 93e4c50b Guido Trotter
        if snode in node_info:
707 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
708 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
709 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
710 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
711 93e4c50b Guido Trotter
        else:
712 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
713 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
714 93e4c50b Guido Trotter
715 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
716 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
717 a8083063 Iustin Pop
                                       feedback_fn)
718 a8083063 Iustin Pop
    bad = bad or result
719 a8083063 Iustin Pop
720 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
721 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
722 a8083063 Iustin Pop
                                         feedback_fn)
723 a8083063 Iustin Pop
    bad = bad or result
724 a8083063 Iustin Pop
725 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
726 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
727 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
728 e54c4c5e Guido Trotter
      bad = bad or result
729 2b3b6ddd Guido Trotter
730 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
731 2b3b6ddd Guido Trotter
    if i_non_redundant:
732 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
733 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
734 2b3b6ddd Guido Trotter
735 a8083063 Iustin Pop
    return int(bad)
736 a8083063 Iustin Pop
737 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
738 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
739 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
740 d8fff41c Guido Trotter

741 d8fff41c Guido Trotter
    Args:
742 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
743 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
744 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
745 d8fff41c Guido Trotter
      lu_result: previous Exec result
746 d8fff41c Guido Trotter

747 d8fff41c Guido Trotter
    """
748 d8fff41c Guido Trotter
    # We only really run POST phase hooks, and are only interested in their results
749 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
750 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
751 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
752 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
753 d8fff41c Guido Trotter
      if not hooks_results:
754 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
755 d8fff41c Guido Trotter
        lu_result = 1
756 d8fff41c Guido Trotter
      else:
757 d8fff41c Guido Trotter
        for node_name in hooks_results:
758 d8fff41c Guido Trotter
          show_node_header = True
759 d8fff41c Guido Trotter
          res = hooks_results[node_name]
760 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
761 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
762 d8fff41c Guido Trotter
            lu_result = 1
763 d8fff41c Guido Trotter
            continue
764 d8fff41c Guido Trotter
          for script, hkr, output in res:
765 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
766 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
767 d8fff41c Guido Trotter
              # failing hooks on that node
768 d8fff41c Guido Trotter
              if show_node_header:
769 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
770 d8fff41c Guido Trotter
                show_node_header = False
771 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
772 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
773 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
774 d8fff41c Guido Trotter
              lu_result = 1
775 d8fff41c Guido Trotter
776 d8fff41c Guido Trotter
      return lu_result
777 d8fff41c Guido Trotter
778 a8083063 Iustin Pop
779 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
780 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
781 2c95a8d4 Iustin Pop

782 2c95a8d4 Iustin Pop
  """
783 2c95a8d4 Iustin Pop
  _OP_REQP = []
784 2c95a8d4 Iustin Pop
785 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
786 2c95a8d4 Iustin Pop
    """Check prerequisites.
787 2c95a8d4 Iustin Pop

788 2c95a8d4 Iustin Pop
    This has no prerequisites.
789 2c95a8d4 Iustin Pop

790 2c95a8d4 Iustin Pop
    """
791 2c95a8d4 Iustin Pop
    pass
792 2c95a8d4 Iustin Pop
793 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
794 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
795 2c95a8d4 Iustin Pop

796 2c95a8d4 Iustin Pop
    """
797 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
798 2c95a8d4 Iustin Pop
799 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
800 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
801 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
802 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
803 2c95a8d4 Iustin Pop
804 2c95a8d4 Iustin Pop
    nv_dict = {}
805 2c95a8d4 Iustin Pop
    for inst in instances:
806 2c95a8d4 Iustin Pop
      inst_lvs = {}
807 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
808 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
809 2c95a8d4 Iustin Pop
        continue
810 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
811 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
812 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
813 2c95a8d4 Iustin Pop
        for vol in vol_list:
814 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
815 2c95a8d4 Iustin Pop
816 2c95a8d4 Iustin Pop
    if not nv_dict:
817 2c95a8d4 Iustin Pop
      return result
818 2c95a8d4 Iustin Pop
819 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
820 2c95a8d4 Iustin Pop
821 2c95a8d4 Iustin Pop
    to_act = set()
822 2c95a8d4 Iustin Pop
    for node in nodes:
823 2c95a8d4 Iustin Pop
      # node_volume
824 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
825 2c95a8d4 Iustin Pop
826 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
827 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
828 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
829 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
830 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
831 2c95a8d4 Iustin Pop
                    (node,))
832 2c95a8d4 Iustin Pop
        res_nodes.append(node)
833 2c95a8d4 Iustin Pop
        continue
834 2c95a8d4 Iustin Pop
835 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
836 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
837 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
838 b63ed789 Iustin Pop
            and inst.name not in res_instances):
839 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
840 2c95a8d4 Iustin Pop
841 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
842 b63ed789 Iustin Pop
    # data better
843 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
844 b63ed789 Iustin Pop
      if inst.name not in res_missing:
845 b63ed789 Iustin Pop
        res_missing[inst.name] = []
846 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
847 b63ed789 Iustin Pop
848 2c95a8d4 Iustin Pop
    return result
849 2c95a8d4 Iustin Pop
850 2c95a8d4 Iustin Pop
851 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
852 07bd8a51 Iustin Pop
  """Rename the cluster.
853 07bd8a51 Iustin Pop

854 07bd8a51 Iustin Pop
  """
855 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
856 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
857 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
858 05f86716 Guido Trotter
  REQ_WSSTORE = True
859 07bd8a51 Iustin Pop
860 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
861 07bd8a51 Iustin Pop
    """Build hooks env.
862 07bd8a51 Iustin Pop

863 07bd8a51 Iustin Pop
    """
864 07bd8a51 Iustin Pop
    env = {
865 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
866 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
867 07bd8a51 Iustin Pop
      }
868 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
869 07bd8a51 Iustin Pop
    return env, [mn], [mn]
870 07bd8a51 Iustin Pop
871 07bd8a51 Iustin Pop
  def CheckPrereq(self):
872 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
873 07bd8a51 Iustin Pop

874 07bd8a51 Iustin Pop
    """
875 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
876 07bd8a51 Iustin Pop
877 bcf043c9 Iustin Pop
    new_name = hostname.name
878 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
879 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
880 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
881 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
882 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
883 07bd8a51 Iustin Pop
                                 " cluster has changed")
884 07bd8a51 Iustin Pop
    if new_ip != old_ip:
885 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
886 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
887 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
888 07bd8a51 Iustin Pop
                                   new_ip)
889 07bd8a51 Iustin Pop
890 07bd8a51 Iustin Pop
    self.op.name = new_name
891 07bd8a51 Iustin Pop
892 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
893 07bd8a51 Iustin Pop
    """Rename the cluster.
894 07bd8a51 Iustin Pop

895 07bd8a51 Iustin Pop
    """
896 07bd8a51 Iustin Pop
    clustername = self.op.name
897 07bd8a51 Iustin Pop
    ip = self.ip
898 07bd8a51 Iustin Pop
    ss = self.sstore
899 07bd8a51 Iustin Pop
900 07bd8a51 Iustin Pop
    # shutdown the master IP
901 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
902 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
903 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
904 07bd8a51 Iustin Pop
905 07bd8a51 Iustin Pop
    try:
906 07bd8a51 Iustin Pop
      # modify the sstore
907 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
908 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
909 07bd8a51 Iustin Pop
910 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
911 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
912 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
913 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
914 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
915 07bd8a51 Iustin Pop
916 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
917 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
918 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
919 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
920 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
921 07bd8a51 Iustin Pop
          if not result[to_node]:
922 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
923 07bd8a51 Iustin Pop
                         (fname, to_node))
924 07bd8a51 Iustin Pop
    finally:
925 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
926 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
927 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
928 07bd8a51 Iustin Pop
929 07bd8a51 Iustin Pop
930 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
931 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
932 8084f9f6 Manuel Franceschini

933 8084f9f6 Manuel Franceschini
  Args:
934 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
935 8084f9f6 Manuel Franceschini

936 8084f9f6 Manuel Franceschini
  Returns:
937 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
938 8084f9f6 Manuel Franceschini

939 8084f9f6 Manuel Franceschini
  """
940 8084f9f6 Manuel Franceschini
  if disk.children:
941 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
942 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
943 8084f9f6 Manuel Franceschini
        return True
944 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
945 8084f9f6 Manuel Franceschini
946 8084f9f6 Manuel Franceschini
947 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
948 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
949 8084f9f6 Manuel Franceschini

950 8084f9f6 Manuel Franceschini
  """
951 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
952 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
953 8084f9f6 Manuel Franceschini
  _OP_REQP = []
954 8084f9f6 Manuel Franceschini
955 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
956 8084f9f6 Manuel Franceschini
    """Build hooks env.
957 8084f9f6 Manuel Franceschini

958 8084f9f6 Manuel Franceschini
    """
959 8084f9f6 Manuel Franceschini
    env = {
960 8084f9f6 Manuel Franceschini
      "OP_TARGET": self.sstore.GetClusterName(),
961 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
962 8084f9f6 Manuel Franceschini
      }
963 8084f9f6 Manuel Franceschini
    mn = self.sstore.GetMasterNode()
964 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
965 8084f9f6 Manuel Franceschini
966 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
967 8084f9f6 Manuel Franceschini
    """Check prerequisites.
968 8084f9f6 Manuel Franceschini

969 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
970 5f83e263 Iustin Pop
    if the given volume group is valid.
971 8084f9f6 Manuel Franceschini

972 8084f9f6 Manuel Franceschini
    """
973 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
974 8084f9f6 Manuel Franceschini
      instances = [self.cfg.GetInstanceInfo(name)
975 8084f9f6 Manuel Franceschini
                   for name in self.cfg.GetInstanceList()]
976 8084f9f6 Manuel Franceschini
      for inst in instances:
977 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
978 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
979 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
980 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
981 8084f9f6 Manuel Franceschini
982 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
983 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
984 8084f9f6 Manuel Franceschini
      node_list = self.cfg.GetNodeList()
985 8084f9f6 Manuel Franceschini
      vglist = rpc.call_vg_list(node_list)
986 8084f9f6 Manuel Franceschini
      for node in node_list:
987 8d1a2a64 Michael Hanselmann
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
988 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
989 8084f9f6 Manuel Franceschini
        if vgstatus:
990 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
991 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
992 8084f9f6 Manuel Franceschini
993 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
994 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
995 8084f9f6 Manuel Franceschini

996 8084f9f6 Manuel Franceschini
    """
997 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
998 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
999 8084f9f6 Manuel Franceschini
    else:
1000 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1001 8084f9f6 Manuel Franceschini
                  " state, not changing")
1002 8084f9f6 Manuel Franceschini
1003 8084f9f6 Manuel Franceschini
1004 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1005 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1006 a8083063 Iustin Pop

1007 a8083063 Iustin Pop
  """
1008 a8083063 Iustin Pop
  if not instance.disks:
1009 a8083063 Iustin Pop
    return True
1010 a8083063 Iustin Pop
1011 a8083063 Iustin Pop
  if not oneshot:
1012 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1013 a8083063 Iustin Pop
1014 a8083063 Iustin Pop
  node = instance.primary_node
1015 a8083063 Iustin Pop
1016 a8083063 Iustin Pop
  for dev in instance.disks:
1017 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1018 a8083063 Iustin Pop
1019 a8083063 Iustin Pop
  retries = 0
1020 a8083063 Iustin Pop
  while True:
1021 a8083063 Iustin Pop
    max_time = 0
1022 a8083063 Iustin Pop
    done = True
1023 a8083063 Iustin Pop
    cumul_degraded = False
1024 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1025 a8083063 Iustin Pop
    if not rstats:
1026 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1027 a8083063 Iustin Pop
      retries += 1
1028 a8083063 Iustin Pop
      if retries >= 10:
1029 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1030 3ecf6786 Iustin Pop
                                 " aborting." % node)
1031 a8083063 Iustin Pop
      time.sleep(6)
1032 a8083063 Iustin Pop
      continue
1033 a8083063 Iustin Pop
    retries = 0
1034 a8083063 Iustin Pop
    for i in range(len(rstats)):
1035 a8083063 Iustin Pop
      mstat = rstats[i]
1036 a8083063 Iustin Pop
      if mstat is None:
1037 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1038 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1039 a8083063 Iustin Pop
        continue
1040 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1041 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1042 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1043 a8083063 Iustin Pop
      if perc_done is not None:
1044 a8083063 Iustin Pop
        done = False
1045 a8083063 Iustin Pop
        if est_time is not None:
1046 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1047 a8083063 Iustin Pop
          max_time = est_time
1048 a8083063 Iustin Pop
        else:
1049 a8083063 Iustin Pop
          rem_time = "no time estimate"
1050 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1051 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1052 a8083063 Iustin Pop
    if done or oneshot:
1053 a8083063 Iustin Pop
      break
1054 a8083063 Iustin Pop
1055 a8083063 Iustin Pop
    if unlock:
1056 685ee993 Iustin Pop
      #utils.Unlock('cmd')
1057 685ee993 Iustin Pop
      pass
1058 a8083063 Iustin Pop
    try:
1059 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1060 a8083063 Iustin Pop
    finally:
1061 a8083063 Iustin Pop
      if unlock:
1062 685ee993 Iustin Pop
        #utils.Lock('cmd')
1063 685ee993 Iustin Pop
        pass
1064 a8083063 Iustin Pop
1065 a8083063 Iustin Pop
  if done:
1066 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1067 a8083063 Iustin Pop
  return not cumul_degraded
1068 a8083063 Iustin Pop
1069 a8083063 Iustin Pop
1070 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1071 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1072 a8083063 Iustin Pop

1073 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1074 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1075 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1076 0834c866 Iustin Pop

1077 a8083063 Iustin Pop
  """
1078 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1079 0834c866 Iustin Pop
  if ldisk:
1080 0834c866 Iustin Pop
    idx = 6
1081 0834c866 Iustin Pop
  else:
1082 0834c866 Iustin Pop
    idx = 5
1083 a8083063 Iustin Pop
1084 a8083063 Iustin Pop
  result = True
1085 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1086 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1087 a8083063 Iustin Pop
    if not rstats:
1088 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1089 a8083063 Iustin Pop
      result = False
1090 a8083063 Iustin Pop
    else:
1091 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1092 a8083063 Iustin Pop
  if dev.children:
1093 a8083063 Iustin Pop
    for child in dev.children:
1094 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1095 a8083063 Iustin Pop
1096 a8083063 Iustin Pop
  return result
1097 a8083063 Iustin Pop
1098 a8083063 Iustin Pop
1099 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1100 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1101 a8083063 Iustin Pop

1102 a8083063 Iustin Pop
  """
1103 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1104 a8083063 Iustin Pop
1105 a8083063 Iustin Pop
  def CheckPrereq(self):
1106 a8083063 Iustin Pop
    """Check prerequisites.
1107 a8083063 Iustin Pop

1108 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1109 a8083063 Iustin Pop

1110 a8083063 Iustin Pop
    """
1111 1f9430d6 Iustin Pop
    if self.op.names:
1112 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1113 1f9430d6 Iustin Pop
1114 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1115 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1116 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1117 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1118 1f9430d6 Iustin Pop
1119 1f9430d6 Iustin Pop
  @staticmethod
1120 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1121 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1122 1f9430d6 Iustin Pop

1123 1f9430d6 Iustin Pop
      Args:
1124 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1125 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1126 1f9430d6 Iustin Pop

1127 1f9430d6 Iustin Pop
      Returns:
1128 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1129 1f9430d6 Iustin Pop
             nodes as
1130 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1131 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1132 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1133 1f9430d6 Iustin Pop
                  }
1134 1f9430d6 Iustin Pop

1135 1f9430d6 Iustin Pop
    """
1136 1f9430d6 Iustin Pop
    all_os = {}
1137 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1138 1f9430d6 Iustin Pop
      if not nr:
1139 1f9430d6 Iustin Pop
        continue
1140 b4de68a9 Iustin Pop
      for os_obj in nr:
1141 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1142 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1143 1f9430d6 Iustin Pop
          # for each node in node_list
1144 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1145 1f9430d6 Iustin Pop
          for nname in node_list:
1146 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1147 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1148 1f9430d6 Iustin Pop
    return all_os
1149 a8083063 Iustin Pop
1150 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1151 a8083063 Iustin Pop
    """Compute the list of OSes.
1152 a8083063 Iustin Pop

1153 a8083063 Iustin Pop
    """
1154 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1155 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1156 a8083063 Iustin Pop
    if node_data == False:
1157 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1158 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1159 1f9430d6 Iustin Pop
    output = []
1160 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1161 1f9430d6 Iustin Pop
      row = []
1162 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1163 1f9430d6 Iustin Pop
        if field == "name":
1164 1f9430d6 Iustin Pop
          val = os_name
1165 1f9430d6 Iustin Pop
        elif field == "valid":
1166 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1167 1f9430d6 Iustin Pop
        elif field == "node_status":
1168 1f9430d6 Iustin Pop
          val = {}
1169 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1170 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1171 1f9430d6 Iustin Pop
        else:
1172 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1173 1f9430d6 Iustin Pop
        row.append(val)
1174 1f9430d6 Iustin Pop
      output.append(row)
1175 1f9430d6 Iustin Pop
1176 1f9430d6 Iustin Pop
    return output
1177 a8083063 Iustin Pop
1178 a8083063 Iustin Pop
1179 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1180 a8083063 Iustin Pop
  """Logical unit for removing a node.
1181 a8083063 Iustin Pop

1182 a8083063 Iustin Pop
  """
1183 a8083063 Iustin Pop
  HPATH = "node-remove"
1184 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1185 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1186 a8083063 Iustin Pop
1187 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1188 a8083063 Iustin Pop
    """Build hooks env.
1189 a8083063 Iustin Pop

1190 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1191 d08869ee Guido Trotter
    node would then be impossible to remove.
1192 a8083063 Iustin Pop

1193 a8083063 Iustin Pop
    """
1194 396e1b78 Michael Hanselmann
    env = {
1195 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1196 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1197 396e1b78 Michael Hanselmann
      }
1198 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1199 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1200 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1201 a8083063 Iustin Pop
1202 a8083063 Iustin Pop
  def CheckPrereq(self):
1203 a8083063 Iustin Pop
    """Check prerequisites.
1204 a8083063 Iustin Pop

1205 a8083063 Iustin Pop
    This checks:
1206 a8083063 Iustin Pop
     - the node exists in the configuration
1207 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1208 a8083063 Iustin Pop
     - it's not the master
1209 a8083063 Iustin Pop

1210 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1211 a8083063 Iustin Pop

1212 a8083063 Iustin Pop
    """
1213 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1214 a8083063 Iustin Pop
    if node is None:
1215 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1216 a8083063 Iustin Pop
1217 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1218 a8083063 Iustin Pop
1219 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1220 a8083063 Iustin Pop
    if node.name == masternode:
1221 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1222 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1223 a8083063 Iustin Pop
1224 a8083063 Iustin Pop
    for instance_name in instance_list:
1225 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1226 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1227 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1228 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1229 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1230 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1231 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1232 a8083063 Iustin Pop
    self.op.node_name = node.name
1233 a8083063 Iustin Pop
    self.node = node
1234 a8083063 Iustin Pop
1235 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1236 a8083063 Iustin Pop
    """Removes the node from the cluster.
1237 a8083063 Iustin Pop

1238 a8083063 Iustin Pop
    """
1239 a8083063 Iustin Pop
    node = self.node
1240 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1241 a8083063 Iustin Pop
                node.name)
1242 a8083063 Iustin Pop
1243 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1244 a8083063 Iustin Pop
1245 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1246 a8083063 Iustin Pop
1247 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1248 a8083063 Iustin Pop
1249 d9c02ca6 Michael Hanselmann
    utils.RemoveHostFromEtcHosts(node.name)
1250 c8a0948f Michael Hanselmann
1251 a8083063 Iustin Pop
1252 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1253 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1254 a8083063 Iustin Pop

1255 a8083063 Iustin Pop
  """
1256 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1257 a8083063 Iustin Pop
1258 a8083063 Iustin Pop
  def CheckPrereq(self):
1259 a8083063 Iustin Pop
    """Check prerequisites.
1260 a8083063 Iustin Pop

1261 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1262 a8083063 Iustin Pop

1263 a8083063 Iustin Pop
    """
1264 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1265 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1266 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1267 e8a4c138 Iustin Pop
      "bootid",
1268 e8a4c138 Iustin Pop
      "ctotal",
1269 e8a4c138 Iustin Pop
      ])
1270 a8083063 Iustin Pop
1271 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1272 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1273 130a6a6f Iustin Pop
                               "pip", "sip", "tags"],
1274 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1275 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1276 a8083063 Iustin Pop
1277 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1278 a8083063 Iustin Pop
1279 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1280 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1281 a8083063 Iustin Pop

1282 a8083063 Iustin Pop
    """
1283 246e180a Iustin Pop
    nodenames = self.wanted
1284 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1285 a8083063 Iustin Pop
1286 a8083063 Iustin Pop
    # begin data gathering
1287 a8083063 Iustin Pop
1288 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1289 a8083063 Iustin Pop
      live_data = {}
1290 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1291 a8083063 Iustin Pop
      for name in nodenames:
1292 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1293 a8083063 Iustin Pop
        if nodeinfo:
1294 a8083063 Iustin Pop
          live_data[name] = {
1295 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1296 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1297 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1298 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1299 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1300 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1301 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1302 a8083063 Iustin Pop
            }
1303 a8083063 Iustin Pop
        else:
1304 a8083063 Iustin Pop
          live_data[name] = {}
1305 a8083063 Iustin Pop
    else:
1306 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1307 a8083063 Iustin Pop
1308 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1309 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1310 a8083063 Iustin Pop
1311 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1312 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1313 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1314 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1315 a8083063 Iustin Pop
1316 ec223efb Iustin Pop
      for instance_name in instancelist:
1317 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1318 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1319 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1320 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1321 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1322 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1323 a8083063 Iustin Pop
1324 a8083063 Iustin Pop
    # end data gathering
1325 a8083063 Iustin Pop
1326 a8083063 Iustin Pop
    output = []
1327 a8083063 Iustin Pop
    for node in nodelist:
1328 a8083063 Iustin Pop
      node_output = []
1329 a8083063 Iustin Pop
      for field in self.op.output_fields:
1330 a8083063 Iustin Pop
        if field == "name":
1331 a8083063 Iustin Pop
          val = node.name
1332 ec223efb Iustin Pop
        elif field == "pinst_list":
1333 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1334 ec223efb Iustin Pop
        elif field == "sinst_list":
1335 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1336 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1337 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1338 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1339 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1340 a8083063 Iustin Pop
        elif field == "pip":
1341 a8083063 Iustin Pop
          val = node.primary_ip
1342 a8083063 Iustin Pop
        elif field == "sip":
1343 a8083063 Iustin Pop
          val = node.secondary_ip
1344 130a6a6f Iustin Pop
        elif field == "tags":
1345 130a6a6f Iustin Pop
          val = list(node.GetTags())
1346 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1347 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1348 a8083063 Iustin Pop
        else:
1349 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1350 a8083063 Iustin Pop
        node_output.append(val)
1351 a8083063 Iustin Pop
      output.append(node_output)
1352 a8083063 Iustin Pop
1353 a8083063 Iustin Pop
    return output
1354 a8083063 Iustin Pop
1355 a8083063 Iustin Pop
1356 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1357 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1358 dcb93971 Michael Hanselmann

1359 dcb93971 Michael Hanselmann
  """
1360 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1361 dcb93971 Michael Hanselmann
1362 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1363 dcb93971 Michael Hanselmann
    """Check prerequisites.
1364 dcb93971 Michael Hanselmann

1365 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1366 dcb93971 Michael Hanselmann

1367 dcb93971 Michael Hanselmann
    """
1368 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1369 dcb93971 Michael Hanselmann
1370 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1371 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1372 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1373 dcb93971 Michael Hanselmann
1374 dcb93971 Michael Hanselmann
1375 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1376 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1377 dcb93971 Michael Hanselmann

1378 dcb93971 Michael Hanselmann
    """
1379 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1380 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1381 dcb93971 Michael Hanselmann
1382 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1383 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1384 dcb93971 Michael Hanselmann
1385 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1386 dcb93971 Michael Hanselmann
1387 dcb93971 Michael Hanselmann
    output = []
1388 dcb93971 Michael Hanselmann
    for node in nodenames:
1389 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1390 37d19eb2 Michael Hanselmann
        continue
1391 37d19eb2 Michael Hanselmann
1392 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1393 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1394 dcb93971 Michael Hanselmann
1395 dcb93971 Michael Hanselmann
      for vol in node_vols:
1396 dcb93971 Michael Hanselmann
        node_output = []
1397 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1398 dcb93971 Michael Hanselmann
          if field == "node":
1399 dcb93971 Michael Hanselmann
            val = node
1400 dcb93971 Michael Hanselmann
          elif field == "phys":
1401 dcb93971 Michael Hanselmann
            val = vol['dev']
1402 dcb93971 Michael Hanselmann
          elif field == "vg":
1403 dcb93971 Michael Hanselmann
            val = vol['vg']
1404 dcb93971 Michael Hanselmann
          elif field == "name":
1405 dcb93971 Michael Hanselmann
            val = vol['name']
1406 dcb93971 Michael Hanselmann
          elif field == "size":
1407 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1408 dcb93971 Michael Hanselmann
          elif field == "instance":
1409 dcb93971 Michael Hanselmann
            for inst in ilist:
1410 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1411 dcb93971 Michael Hanselmann
                continue
1412 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1413 dcb93971 Michael Hanselmann
                val = inst.name
1414 dcb93971 Michael Hanselmann
                break
1415 dcb93971 Michael Hanselmann
            else:
1416 dcb93971 Michael Hanselmann
              val = '-'
1417 dcb93971 Michael Hanselmann
          else:
1418 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1419 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1420 dcb93971 Michael Hanselmann
1421 dcb93971 Michael Hanselmann
        output.append(node_output)
1422 dcb93971 Michael Hanselmann
1423 dcb93971 Michael Hanselmann
    return output
1424 dcb93971 Michael Hanselmann
1425 dcb93971 Michael Hanselmann
1426 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1427 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1428 a8083063 Iustin Pop

1429 a8083063 Iustin Pop
  """
1430 a8083063 Iustin Pop
  HPATH = "node-add"
1431 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1432 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1433 a8083063 Iustin Pop
1434 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1435 a8083063 Iustin Pop
    """Build hooks env.
1436 a8083063 Iustin Pop

1437 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1438 a8083063 Iustin Pop

1439 a8083063 Iustin Pop
    """
1440 a8083063 Iustin Pop
    env = {
1441 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1442 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1443 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1444 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1445 a8083063 Iustin Pop
      }
1446 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1447 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1448 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1449 a8083063 Iustin Pop
1450 a8083063 Iustin Pop
  def CheckPrereq(self):
1451 a8083063 Iustin Pop
    """Check prerequisites.
1452 a8083063 Iustin Pop

1453 a8083063 Iustin Pop
    This checks:
1454 a8083063 Iustin Pop
     - the new node is not already in the config
1455 a8083063 Iustin Pop
     - it is resolvable
1456 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1457 a8083063 Iustin Pop

1458 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1459 a8083063 Iustin Pop

1460 a8083063 Iustin Pop
    """
1461 a8083063 Iustin Pop
    node_name = self.op.node_name
1462 a8083063 Iustin Pop
    cfg = self.cfg
1463 a8083063 Iustin Pop
1464 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1465 a8083063 Iustin Pop
1466 bcf043c9 Iustin Pop
    node = dns_data.name
1467 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1468 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1469 a8083063 Iustin Pop
    if secondary_ip is None:
1470 a8083063 Iustin Pop
      secondary_ip = primary_ip
1471 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1472 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1473 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1474 e7c6e02b Michael Hanselmann
1475 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1476 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1477 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1478 e7c6e02b Michael Hanselmann
                                 node)
1479 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1480 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1481 a8083063 Iustin Pop
1482 a8083063 Iustin Pop
    for existing_node_name in node_list:
1483 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1484 e7c6e02b Michael Hanselmann
1485 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1486 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1487 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1488 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1489 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1490 e7c6e02b Michael Hanselmann
        continue
1491 e7c6e02b Michael Hanselmann
1492 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1493 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1494 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1495 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1496 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1497 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1498 a8083063 Iustin Pop
1499 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1500 a8083063 Iustin Pop
    # same as for the master
1501 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1502 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1503 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1504 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1505 a8083063 Iustin Pop
      if master_singlehomed:
1506 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1507 3ecf6786 Iustin Pop
                                   " new node has one")
1508 a8083063 Iustin Pop
      else:
1509 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1510 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1511 a8083063 Iustin Pop
1512 a8083063 Iustin Pop
    # checks reachablity
1513 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1514 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1515 a8083063 Iustin Pop
1516 a8083063 Iustin Pop
    if not newbie_singlehomed:
1517 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1518 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1519 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1520 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1521 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1522 a8083063 Iustin Pop
1523 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1524 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1525 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1526 a8083063 Iustin Pop
1527 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1528 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1529 a8083063 Iustin Pop

1530 a8083063 Iustin Pop
    """
1531 a8083063 Iustin Pop
    new_node = self.new_node
1532 a8083063 Iustin Pop
    node = new_node.name
1533 a8083063 Iustin Pop
1534 a8083063 Iustin Pop
    # check connectivity
1535 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1536 a8083063 Iustin Pop
    if result:
1537 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1538 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1539 a8083063 Iustin Pop
                    (node, result))
1540 a8083063 Iustin Pop
      else:
1541 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1542 3ecf6786 Iustin Pop
                                 " node version %s" %
1543 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1544 a8083063 Iustin Pop
    else:
1545 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1546 a8083063 Iustin Pop
1547 a8083063 Iustin Pop
    # setup ssh on node
1548 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1549 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1550 a8083063 Iustin Pop
    keyarray = []
1551 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1552 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1553 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1554 a8083063 Iustin Pop
1555 a8083063 Iustin Pop
    for i in keyfiles:
1556 a8083063 Iustin Pop
      f = open(i, 'r')
1557 a8083063 Iustin Pop
      try:
1558 a8083063 Iustin Pop
        keyarray.append(f.read())
1559 a8083063 Iustin Pop
      finally:
1560 a8083063 Iustin Pop
        f.close()
1561 a8083063 Iustin Pop
1562 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1563 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1564 a8083063 Iustin Pop
1565 a8083063 Iustin Pop
    if not result:
1566 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1567 a8083063 Iustin Pop
1568 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1569 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
1570 c8a0948f Michael Hanselmann
1571 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1572 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1573 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1574 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1575 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1576 16abfbc2 Alexander Schreiber
                                    10, False):
1577 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1578 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1579 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1580 a8083063 Iustin Pop
1581 5c0527ed Guido Trotter
    node_verify_list = [self.sstore.GetMasterNode()]
1582 5c0527ed Guido Trotter
    node_verify_param = {
1583 5c0527ed Guido Trotter
      'nodelist': [node],
1584 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
1585 5c0527ed Guido Trotter
    }
1586 5c0527ed Guido Trotter
1587 5c0527ed Guido Trotter
    result = rpc.call_node_verify(node_verify_list, node_verify_param)
1588 5c0527ed Guido Trotter
    for verifier in node_verify_list:
1589 5c0527ed Guido Trotter
      if not result[verifier]:
1590 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1591 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
1592 5c0527ed Guido Trotter
      if result[verifier]['nodelist']:
1593 5c0527ed Guido Trotter
        for failed in result[verifier]['nodelist']:
1594 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1595 5c0527ed Guido Trotter
                      (verifier, result[verifier]['nodelist'][failed]))
1596 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
1597 ff98055b Iustin Pop
1598 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1599 a8083063 Iustin Pop
    # including the node just added
1600 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1601 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1602 102b115b Michael Hanselmann
    if not self.op.readd:
1603 102b115b Michael Hanselmann
      dist_nodes.append(node)
1604 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1605 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1606 a8083063 Iustin Pop
1607 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1608 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1609 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1610 a8083063 Iustin Pop
      for to_node in dist_nodes:
1611 a8083063 Iustin Pop
        if not result[to_node]:
1612 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1613 a8083063 Iustin Pop
                       (fname, to_node))
1614 a8083063 Iustin Pop
1615 3d1e7706 Guido Trotter
    to_copy = self.sstore.GetFileList()
1616 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1617 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1618 a8083063 Iustin Pop
    for fname in to_copy:
1619 b5602d15 Guido Trotter
      result = rpc.call_upload_file([node], fname)
1620 b5602d15 Guido Trotter
      if not result[node]:
1621 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1622 a8083063 Iustin Pop
1623 e7c6e02b Michael Hanselmann
    if not self.op.readd:
1624 e7c6e02b Michael Hanselmann
      logger.Info("adding node %s to cluster.conf" % node)
1625 e7c6e02b Michael Hanselmann
      self.cfg.AddNode(new_node)
1626 a8083063 Iustin Pop
1627 a8083063 Iustin Pop
1628 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1629 a8083063 Iustin Pop
  """Failover the master node to the current node.
1630 a8083063 Iustin Pop

1631 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1632 a8083063 Iustin Pop

1633 a8083063 Iustin Pop
  """
1634 a8083063 Iustin Pop
  HPATH = "master-failover"
1635 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1636 a8083063 Iustin Pop
  REQ_MASTER = False
1637 05f86716 Guido Trotter
  REQ_WSSTORE = True
1638 a8083063 Iustin Pop
  _OP_REQP = []
1639 a8083063 Iustin Pop
1640 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1641 a8083063 Iustin Pop
    """Build hooks env.
1642 a8083063 Iustin Pop

1643 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1644 a8083063 Iustin Pop
    the nodes in the post phase.
1645 a8083063 Iustin Pop

1646 a8083063 Iustin Pop
    """
1647 a8083063 Iustin Pop
    env = {
1648 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1649 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1650 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1651 a8083063 Iustin Pop
      }
1652 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1653 a8083063 Iustin Pop
1654 a8083063 Iustin Pop
  def CheckPrereq(self):
1655 a8083063 Iustin Pop
    """Check prerequisites.
1656 a8083063 Iustin Pop

1657 a8083063 Iustin Pop
    This checks that we are not already the master.
1658 a8083063 Iustin Pop

1659 a8083063 Iustin Pop
    """
1660 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1661 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1662 a8083063 Iustin Pop
1663 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1664 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1665 f4bc1f2c Michael Hanselmann
                                 " where you want the new master to be."
1666 f4bc1f2c Michael Hanselmann
                                 " %s is already the master" %
1667 3ecf6786 Iustin Pop
                                 self.old_master)
1668 a8083063 Iustin Pop
1669 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1670 a8083063 Iustin Pop
    """Failover the master node.
1671 a8083063 Iustin Pop

1672 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1673 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1674 a8083063 Iustin Pop
    master.
1675 a8083063 Iustin Pop

1676 a8083063 Iustin Pop
    """
1677 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1678 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1679 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1680 a8083063 Iustin Pop
1681 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1682 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1683 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1684 a8083063 Iustin Pop
1685 880478f8 Iustin Pop
    ss = self.sstore
1686 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1687 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1688 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1689 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1690 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1691 880478f8 Iustin Pop
1692 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1693 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1694 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1695 f4bc1f2c Michael Hanselmann
      feedback_fn("Error in activating the master IP on the new master,"
1696 f4bc1f2c Michael Hanselmann
                  " please fix manually.")
1697 a8083063 Iustin Pop
1698 a8083063 Iustin Pop
1699 a8083063 Iustin Pop
1700 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1701 a8083063 Iustin Pop
  """Query cluster configuration.
1702 a8083063 Iustin Pop

1703 a8083063 Iustin Pop
  """
1704 a8083063 Iustin Pop
  _OP_REQP = []
1705 59322403 Iustin Pop
  REQ_MASTER = False
1706 a8083063 Iustin Pop
1707 a8083063 Iustin Pop
  def CheckPrereq(self):
1708 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1709 a8083063 Iustin Pop

1710 a8083063 Iustin Pop
    """
1711 a8083063 Iustin Pop
    pass
1712 a8083063 Iustin Pop
1713 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1714 a8083063 Iustin Pop
    """Return cluster config.
1715 a8083063 Iustin Pop

1716 a8083063 Iustin Pop
    """
1717 a8083063 Iustin Pop
    result = {
1718 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1719 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1720 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1721 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1722 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1723 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1724 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1725 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1726 8a12ce45 Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
1727 a8083063 Iustin Pop
      }
1728 a8083063 Iustin Pop
1729 a8083063 Iustin Pop
    return result
1730 a8083063 Iustin Pop
1731 a8083063 Iustin Pop
1732 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1733 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1734 a8083063 Iustin Pop

1735 a8083063 Iustin Pop
  """
1736 a8083063 Iustin Pop
  _OP_REQP = []
1737 a8083063 Iustin Pop
1738 a8083063 Iustin Pop
  def CheckPrereq(self):
1739 a8083063 Iustin Pop
    """No prerequisites.
1740 a8083063 Iustin Pop

1741 a8083063 Iustin Pop
    """
1742 a8083063 Iustin Pop
    pass
1743 a8083063 Iustin Pop
1744 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1745 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1746 a8083063 Iustin Pop

1747 a8083063 Iustin Pop
    """
1748 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1749 a8083063 Iustin Pop
1750 a8083063 Iustin Pop
1751 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1752 a8083063 Iustin Pop
  """Bring up an instance's disks.
1753 a8083063 Iustin Pop

1754 a8083063 Iustin Pop
  """
1755 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1756 a8083063 Iustin Pop
1757 a8083063 Iustin Pop
  def CheckPrereq(self):
1758 a8083063 Iustin Pop
    """Check prerequisites.
1759 a8083063 Iustin Pop

1760 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1761 a8083063 Iustin Pop

1762 a8083063 Iustin Pop
    """
1763 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1764 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1765 a8083063 Iustin Pop
    if instance is None:
1766 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1767 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1768 a8083063 Iustin Pop
    self.instance = instance
1769 a8083063 Iustin Pop
1770 a8083063 Iustin Pop
1771 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1772 a8083063 Iustin Pop
    """Activate the disks.
1773 a8083063 Iustin Pop

1774 a8083063 Iustin Pop
    """
1775 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1776 a8083063 Iustin Pop
    if not disks_ok:
1777 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1778 a8083063 Iustin Pop
1779 a8083063 Iustin Pop
    return disks_info
1780 a8083063 Iustin Pop
1781 a8083063 Iustin Pop
1782 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1783 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1784 a8083063 Iustin Pop

1785 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1786 a8083063 Iustin Pop

1787 a8083063 Iustin Pop
  Args:
1788 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1789 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1790 a8083063 Iustin Pop
                        in an error return from the function
1791 a8083063 Iustin Pop

1792 a8083063 Iustin Pop
  Returns:
1793 a8083063 Iustin Pop
    false if the operation failed
1794 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1795 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1796 a8083063 Iustin Pop
  """
1797 a8083063 Iustin Pop
  device_info = []
1798 a8083063 Iustin Pop
  disks_ok = True
1799 fdbd668d Iustin Pop
  iname = instance.name
1800 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1801 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1802 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1803 fdbd668d Iustin Pop
1804 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1805 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1806 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1807 fdbd668d Iustin Pop
  # SyncSource, etc.)
1808 fdbd668d Iustin Pop
1809 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1810 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1811 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1812 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1813 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1814 a8083063 Iustin Pop
      if not result:
1815 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1816 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1817 fdbd668d Iustin Pop
        if not ignore_secondaries:
1818 a8083063 Iustin Pop
          disks_ok = False
1819 fdbd668d Iustin Pop
1820 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1821 fdbd668d Iustin Pop
1822 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1823 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1824 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1825 fdbd668d Iustin Pop
      if node != instance.primary_node:
1826 fdbd668d Iustin Pop
        continue
1827 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
1828 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1829 fdbd668d Iustin Pop
      if not result:
1830 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1831 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1832 fdbd668d Iustin Pop
        disks_ok = False
1833 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1834 a8083063 Iustin Pop
1835 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1836 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1837 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1838 b352ab5b Iustin Pop
  for disk in instance.disks:
1839 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1840 b352ab5b Iustin Pop
1841 a8083063 Iustin Pop
  return disks_ok, device_info
1842 a8083063 Iustin Pop
1843 a8083063 Iustin Pop
1844 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1845 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1846 3ecf6786 Iustin Pop

1847 3ecf6786 Iustin Pop
  """
1848 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1849 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1850 fe7b0351 Michael Hanselmann
  if not disks_ok:
1851 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1852 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1853 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1854 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1855 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1856 fe7b0351 Michael Hanselmann
1857 fe7b0351 Michael Hanselmann
1858 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1859 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1860 a8083063 Iustin Pop

1861 a8083063 Iustin Pop
  """
1862 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1863 a8083063 Iustin Pop
1864 a8083063 Iustin Pop
  def CheckPrereq(self):
1865 a8083063 Iustin Pop
    """Check prerequisites.
1866 a8083063 Iustin Pop

1867 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1868 a8083063 Iustin Pop

1869 a8083063 Iustin Pop
    """
1870 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1871 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1872 a8083063 Iustin Pop
    if instance is None:
1873 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1874 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1875 a8083063 Iustin Pop
    self.instance = instance
1876 a8083063 Iustin Pop
1877 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1878 a8083063 Iustin Pop
    """Deactivate the disks
1879 a8083063 Iustin Pop

1880 a8083063 Iustin Pop
    """
1881 a8083063 Iustin Pop
    instance = self.instance
1882 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1883 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1884 a8083063 Iustin Pop
    if not type(ins_l) is list:
1885 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1886 3ecf6786 Iustin Pop
                               instance.primary_node)
1887 a8083063 Iustin Pop
1888 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1889 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1890 3ecf6786 Iustin Pop
                               " block devices.")
1891 a8083063 Iustin Pop
1892 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1893 a8083063 Iustin Pop
1894 a8083063 Iustin Pop
1895 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1896 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1897 a8083063 Iustin Pop

1898 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1899 a8083063 Iustin Pop

1900 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1901 a8083063 Iustin Pop
  ignored.
1902 a8083063 Iustin Pop

1903 a8083063 Iustin Pop
  """
1904 a8083063 Iustin Pop
  result = True
1905 a8083063 Iustin Pop
  for disk in instance.disks:
1906 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1907 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1908 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1909 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1910 a8083063 Iustin Pop
                     (disk.iv_name, node))
1911 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1912 a8083063 Iustin Pop
          result = False
1913 a8083063 Iustin Pop
  return result
1914 a8083063 Iustin Pop
1915 a8083063 Iustin Pop
1916 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
1917 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
1918 d4f16fd9 Iustin Pop

1919 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
1920 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
1921 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
1922 d4f16fd9 Iustin Pop
  exception.
1923 d4f16fd9 Iustin Pop

1924 d4f16fd9 Iustin Pop
  Args:
1925 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
1926 d4f16fd9 Iustin Pop
    - node: the node name
1927 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
1928 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
1929 d4f16fd9 Iustin Pop

1930 d4f16fd9 Iustin Pop
  """
1931 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
1932 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
1933 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
1934 d4f16fd9 Iustin Pop
                             " information" % (node,))
1935 d4f16fd9 Iustin Pop
1936 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
1937 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
1938 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
1939 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
1940 d4f16fd9 Iustin Pop
  if requested > free_mem:
1941 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
1942 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
1943 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
1944 d4f16fd9 Iustin Pop
1945 d4f16fd9 Iustin Pop
1946 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1947 a8083063 Iustin Pop
  """Starts an instance.
1948 a8083063 Iustin Pop

1949 a8083063 Iustin Pop
  """
1950 a8083063 Iustin Pop
  HPATH = "instance-start"
1951 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1952 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1953 a8083063 Iustin Pop
1954 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1955 a8083063 Iustin Pop
    """Build hooks env.
1956 a8083063 Iustin Pop

1957 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1958 a8083063 Iustin Pop

1959 a8083063 Iustin Pop
    """
1960 a8083063 Iustin Pop
    env = {
1961 a8083063 Iustin Pop
      "FORCE": self.op.force,
1962 a8083063 Iustin Pop
      }
1963 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1964 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1965 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1966 a8083063 Iustin Pop
    return env, nl, nl
1967 a8083063 Iustin Pop
1968 a8083063 Iustin Pop
  def CheckPrereq(self):
1969 a8083063 Iustin Pop
    """Check prerequisites.
1970 a8083063 Iustin Pop

1971 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1972 a8083063 Iustin Pop

1973 a8083063 Iustin Pop
    """
1974 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1975 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1976 a8083063 Iustin Pop
    if instance is None:
1977 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1978 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1979 a8083063 Iustin Pop
1980 a8083063 Iustin Pop
    # check bridges existance
1981 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
1982 a8083063 Iustin Pop
1983 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
1984 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
1985 d4f16fd9 Iustin Pop
                         instance.memory)
1986 d4f16fd9 Iustin Pop
1987 a8083063 Iustin Pop
    self.instance = instance
1988 a8083063 Iustin Pop
    self.op.instance_name = instance.name
1989 a8083063 Iustin Pop
1990 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1991 a8083063 Iustin Pop
    """Start the instance.
1992 a8083063 Iustin Pop

1993 a8083063 Iustin Pop
    """
1994 a8083063 Iustin Pop
    instance = self.instance
1995 a8083063 Iustin Pop
    force = self.op.force
1996 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
1997 a8083063 Iustin Pop
1998 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
1999 fe482621 Iustin Pop
2000 a8083063 Iustin Pop
    node_current = instance.primary_node
2001 a8083063 Iustin Pop
2002 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2003 a8083063 Iustin Pop
2004 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2005 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2006 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2007 a8083063 Iustin Pop
2008 a8083063 Iustin Pop
2009 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2010 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2011 bf6929a2 Alexander Schreiber

2012 bf6929a2 Alexander Schreiber
  """
2013 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2014 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2015 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2016 bf6929a2 Alexander Schreiber
2017 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2018 bf6929a2 Alexander Schreiber
    """Build hooks env.
2019 bf6929a2 Alexander Schreiber

2020 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2021 bf6929a2 Alexander Schreiber

2022 bf6929a2 Alexander Schreiber
    """
2023 bf6929a2 Alexander Schreiber
    env = {
2024 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2025 bf6929a2 Alexander Schreiber
      }
2026 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2027 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2028 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2029 bf6929a2 Alexander Schreiber
    return env, nl, nl
2030 bf6929a2 Alexander Schreiber
2031 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2032 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2033 bf6929a2 Alexander Schreiber

2034 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2035 bf6929a2 Alexander Schreiber

2036 bf6929a2 Alexander Schreiber
    """
2037 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
2038 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
2039 bf6929a2 Alexander Schreiber
    if instance is None:
2040 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
2041 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
2042 bf6929a2 Alexander Schreiber
2043 bf6929a2 Alexander Schreiber
    # check bridges existance
2044 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2045 bf6929a2 Alexander Schreiber
2046 bf6929a2 Alexander Schreiber
    self.instance = instance
2047 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
2048 bf6929a2 Alexander Schreiber
2049 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2050 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2051 bf6929a2 Alexander Schreiber

2052 bf6929a2 Alexander Schreiber
    """
2053 bf6929a2 Alexander Schreiber
    instance = self.instance
2054 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2055 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2056 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2057 bf6929a2 Alexander Schreiber
2058 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2059 bf6929a2 Alexander Schreiber
2060 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2061 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
2062 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
2063 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2064 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
2065 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2066 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2067 bf6929a2 Alexander Schreiber
2068 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2069 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2070 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2071 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2072 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2073 bf6929a2 Alexander Schreiber
    else:
2074 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2075 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2076 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2077 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2078 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2079 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2080 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2081 bf6929a2 Alexander Schreiber
2082 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2083 bf6929a2 Alexander Schreiber
2084 bf6929a2 Alexander Schreiber
2085 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2086 a8083063 Iustin Pop
  """Shutdown an instance.
2087 a8083063 Iustin Pop

2088 a8083063 Iustin Pop
  """
2089 a8083063 Iustin Pop
  HPATH = "instance-stop"
2090 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2091 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2092 a8083063 Iustin Pop
2093 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2094 a8083063 Iustin Pop
    """Build hooks env.
2095 a8083063 Iustin Pop

2096 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2097 a8083063 Iustin Pop

2098 a8083063 Iustin Pop
    """
2099 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2100 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2101 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2102 a8083063 Iustin Pop
    return env, nl, nl
2103 a8083063 Iustin Pop
2104 a8083063 Iustin Pop
  def CheckPrereq(self):
2105 a8083063 Iustin Pop
    """Check prerequisites.
2106 a8083063 Iustin Pop

2107 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2108 a8083063 Iustin Pop

2109 a8083063 Iustin Pop
    """
2110 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2111 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2112 a8083063 Iustin Pop
    if instance is None:
2113 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2114 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2115 a8083063 Iustin Pop
    self.instance = instance
2116 a8083063 Iustin Pop
2117 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2118 a8083063 Iustin Pop
    """Shutdown the instance.
2119 a8083063 Iustin Pop

2120 a8083063 Iustin Pop
    """
2121 a8083063 Iustin Pop
    instance = self.instance
2122 a8083063 Iustin Pop
    node_current = instance.primary_node
2123 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2124 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2125 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2126 a8083063 Iustin Pop
2127 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2128 a8083063 Iustin Pop
2129 a8083063 Iustin Pop
2130 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2131 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2132 fe7b0351 Michael Hanselmann

2133 fe7b0351 Michael Hanselmann
  """
2134 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2135 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2136 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2137 fe7b0351 Michael Hanselmann
2138 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2139 fe7b0351 Michael Hanselmann
    """Build hooks env.
2140 fe7b0351 Michael Hanselmann

2141 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2142 fe7b0351 Michael Hanselmann

2143 fe7b0351 Michael Hanselmann
    """
2144 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2145 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2146 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2147 fe7b0351 Michael Hanselmann
    return env, nl, nl
2148 fe7b0351 Michael Hanselmann
2149 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2150 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2151 fe7b0351 Michael Hanselmann

2152 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2153 fe7b0351 Michael Hanselmann

2154 fe7b0351 Michael Hanselmann
    """
2155 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2156 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2157 fe7b0351 Michael Hanselmann
    if instance is None:
2158 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2159 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2160 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2161 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2162 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2163 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2164 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2165 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2166 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2167 fe7b0351 Michael Hanselmann
    if remote_info:
2168 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2169 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2170 3ecf6786 Iustin Pop
                                  instance.primary_node))
2171 d0834de3 Michael Hanselmann
2172 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2173 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2174 d0834de3 Michael Hanselmann
      # OS verification
2175 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2176 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2177 d0834de3 Michael Hanselmann
      if pnode is None:
2178 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2179 3ecf6786 Iustin Pop
                                   self.op.pnode)
2180 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2181 dfa96ded Guido Trotter
      if not os_obj:
2182 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2183 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2184 d0834de3 Michael Hanselmann
2185 fe7b0351 Michael Hanselmann
    self.instance = instance
2186 fe7b0351 Michael Hanselmann
2187 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2188 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2189 fe7b0351 Michael Hanselmann

2190 fe7b0351 Michael Hanselmann
    """
2191 fe7b0351 Michael Hanselmann
    inst = self.instance
2192 fe7b0351 Michael Hanselmann
2193 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2194 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2195 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2196 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2197 d0834de3 Michael Hanselmann
2198 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2199 fe7b0351 Michael Hanselmann
    try:
2200 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2201 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2202 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2203 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2204 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2205 fe7b0351 Michael Hanselmann
    finally:
2206 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2207 fe7b0351 Michael Hanselmann
2208 fe7b0351 Michael Hanselmann
2209 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2210 decd5f45 Iustin Pop
  """Rename an instance.
2211 decd5f45 Iustin Pop

2212 decd5f45 Iustin Pop
  """
2213 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2214 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2215 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2216 decd5f45 Iustin Pop
2217 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2218 decd5f45 Iustin Pop
    """Build hooks env.
2219 decd5f45 Iustin Pop

2220 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2221 decd5f45 Iustin Pop

2222 decd5f45 Iustin Pop
    """
2223 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2224 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2225 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2226 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2227 decd5f45 Iustin Pop
    return env, nl, nl
2228 decd5f45 Iustin Pop
2229 decd5f45 Iustin Pop
  def CheckPrereq(self):
2230 decd5f45 Iustin Pop
    """Check prerequisites.
2231 decd5f45 Iustin Pop

2232 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2233 decd5f45 Iustin Pop

2234 decd5f45 Iustin Pop
    """
2235 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2236 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2237 decd5f45 Iustin Pop
    if instance is None:
2238 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2239 decd5f45 Iustin Pop
                                 self.op.instance_name)
2240 decd5f45 Iustin Pop
    if instance.status != "down":
2241 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2242 decd5f45 Iustin Pop
                                 self.op.instance_name)
2243 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2244 decd5f45 Iustin Pop
    if remote_info:
2245 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2246 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2247 decd5f45 Iustin Pop
                                  instance.primary_node))
2248 decd5f45 Iustin Pop
    self.instance = instance
2249 decd5f45 Iustin Pop
2250 decd5f45 Iustin Pop
    # new name verification
2251 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2252 decd5f45 Iustin Pop
2253 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2254 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2255 7bde3275 Guido Trotter
    if new_name in instance_list:
2256 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2257 c09f363f Manuel Franceschini
                                 new_name)
2258 7bde3275 Guido Trotter
2259 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2260 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2261 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2262 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2263 decd5f45 Iustin Pop
2264 decd5f45 Iustin Pop
2265 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2266 decd5f45 Iustin Pop
    """Reinstall the instance.
2267 decd5f45 Iustin Pop

2268 decd5f45 Iustin Pop
    """
2269 decd5f45 Iustin Pop
    inst = self.instance
2270 decd5f45 Iustin Pop
    old_name = inst.name
2271 decd5f45 Iustin Pop
2272 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2273 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2274 b23c4333 Manuel Franceschini
2275 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2276 decd5f45 Iustin Pop
2277 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2278 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2279 decd5f45 Iustin Pop
2280 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2281 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2282 b23c4333 Manuel Franceschini
      result = rpc.call_file_storage_dir_rename(inst.primary_node,
2283 b23c4333 Manuel Franceschini
                                                old_file_storage_dir,
2284 b23c4333 Manuel Franceschini
                                                new_file_storage_dir)
2285 b23c4333 Manuel Franceschini
2286 b23c4333 Manuel Franceschini
      if not result:
2287 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2288 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2289 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2290 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2291 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2292 b23c4333 Manuel Franceschini
2293 b23c4333 Manuel Franceschini
      if not result[0]:
2294 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2295 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2296 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2297 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2298 b23c4333 Manuel Franceschini
2299 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2300 decd5f45 Iustin Pop
    try:
2301 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2302 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2303 f4bc1f2c Michael Hanselmann
        msg = ("Could run OS rename script for instance %s on node %s (but the"
2304 f4bc1f2c Michael Hanselmann
               " instance has been renamed in Ganeti)" %
2305 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2306 decd5f45 Iustin Pop
        logger.Error(msg)
2307 decd5f45 Iustin Pop
    finally:
2308 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2309 decd5f45 Iustin Pop
2310 decd5f45 Iustin Pop
2311 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2312 a8083063 Iustin Pop
  """Remove an instance.
2313 a8083063 Iustin Pop

2314 a8083063 Iustin Pop
  """
2315 a8083063 Iustin Pop
  HPATH = "instance-remove"
2316 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2317 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2318 a8083063 Iustin Pop
2319 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2320 a8083063 Iustin Pop
    """Build hooks env.
2321 a8083063 Iustin Pop

2322 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2323 a8083063 Iustin Pop

2324 a8083063 Iustin Pop
    """
2325 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2326 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2327 a8083063 Iustin Pop
    return env, nl, nl
2328 a8083063 Iustin Pop
2329 a8083063 Iustin Pop
  def CheckPrereq(self):
2330 a8083063 Iustin Pop
    """Check prerequisites.
2331 a8083063 Iustin Pop

2332 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2333 a8083063 Iustin Pop

2334 a8083063 Iustin Pop
    """
2335 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2336 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2337 a8083063 Iustin Pop
    if instance is None:
2338 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2339 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2340 a8083063 Iustin Pop
    self.instance = instance
2341 a8083063 Iustin Pop
2342 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2343 a8083063 Iustin Pop
    """Remove the instance.
2344 a8083063 Iustin Pop

2345 a8083063 Iustin Pop
    """
2346 a8083063 Iustin Pop
    instance = self.instance
2347 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2348 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2349 a8083063 Iustin Pop
2350 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2351 1d67656e Iustin Pop
      if self.op.ignore_failures:
2352 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2353 1d67656e Iustin Pop
      else:
2354 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2355 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2356 a8083063 Iustin Pop
2357 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2358 a8083063 Iustin Pop
2359 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2360 1d67656e Iustin Pop
      if self.op.ignore_failures:
2361 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2362 1d67656e Iustin Pop
      else:
2363 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2364 a8083063 Iustin Pop
2365 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2366 a8083063 Iustin Pop
2367 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2368 a8083063 Iustin Pop
2369 a8083063 Iustin Pop
2370 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2371 a8083063 Iustin Pop
  """Logical unit for querying instances.
2372 a8083063 Iustin Pop

2373 a8083063 Iustin Pop
  """
2374 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2375 a8083063 Iustin Pop
2376 a8083063 Iustin Pop
  def CheckPrereq(self):
2377 a8083063 Iustin Pop
    """Check prerequisites.
2378 a8083063 Iustin Pop

2379 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2380 a8083063 Iustin Pop

2381 a8083063 Iustin Pop
    """
2382 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2383 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2384 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2385 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2386 130a6a6f Iustin Pop
                               "sda_size", "sdb_size", "vcpus", "tags"],
2387 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2388 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2389 a8083063 Iustin Pop
2390 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2391 069dcc86 Iustin Pop
2392 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2393 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2394 a8083063 Iustin Pop

2395 a8083063 Iustin Pop
    """
2396 069dcc86 Iustin Pop
    instance_names = self.wanted
2397 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2398 a8083063 Iustin Pop
                     in instance_names]
2399 a8083063 Iustin Pop
2400 a8083063 Iustin Pop
    # begin data gathering
2401 a8083063 Iustin Pop
2402 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2403 a8083063 Iustin Pop
2404 a8083063 Iustin Pop
    bad_nodes = []
2405 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2406 a8083063 Iustin Pop
      live_data = {}
2407 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2408 a8083063 Iustin Pop
      for name in nodes:
2409 a8083063 Iustin Pop
        result = node_data[name]
2410 a8083063 Iustin Pop
        if result:
2411 a8083063 Iustin Pop
          live_data.update(result)
2412 a8083063 Iustin Pop
        elif result == False:
2413 a8083063 Iustin Pop
          bad_nodes.append(name)
2414 a8083063 Iustin Pop
        # else no instance is alive
2415 a8083063 Iustin Pop
    else:
2416 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2417 a8083063 Iustin Pop
2418 a8083063 Iustin Pop
    # end data gathering
2419 a8083063 Iustin Pop
2420 a8083063 Iustin Pop
    output = []
2421 a8083063 Iustin Pop
    for instance in instance_list:
2422 a8083063 Iustin Pop
      iout = []
2423 a8083063 Iustin Pop
      for field in self.op.output_fields:
2424 a8083063 Iustin Pop
        if field == "name":
2425 a8083063 Iustin Pop
          val = instance.name
2426 a8083063 Iustin Pop
        elif field == "os":
2427 a8083063 Iustin Pop
          val = instance.os
2428 a8083063 Iustin Pop
        elif field == "pnode":
2429 a8083063 Iustin Pop
          val = instance.primary_node
2430 a8083063 Iustin Pop
        elif field == "snodes":
2431 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2432 a8083063 Iustin Pop
        elif field == "admin_state":
2433 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2434 a8083063 Iustin Pop
        elif field == "oper_state":
2435 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2436 8a23d2d3 Iustin Pop
            val = None
2437 a8083063 Iustin Pop
          else:
2438 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2439 d8052456 Iustin Pop
        elif field == "status":
2440 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2441 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2442 d8052456 Iustin Pop
          else:
2443 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2444 d8052456 Iustin Pop
            if running:
2445 d8052456 Iustin Pop
              if instance.status != "down":
2446 d8052456 Iustin Pop
                val = "running"
2447 d8052456 Iustin Pop
              else:
2448 d8052456 Iustin Pop
                val = "ERROR_up"
2449 d8052456 Iustin Pop
            else:
2450 d8052456 Iustin Pop
              if instance.status != "down":
2451 d8052456 Iustin Pop
                val = "ERROR_down"
2452 d8052456 Iustin Pop
              else:
2453 d8052456 Iustin Pop
                val = "ADMIN_down"
2454 a8083063 Iustin Pop
        elif field == "admin_ram":
2455 a8083063 Iustin Pop
          val = instance.memory
2456 a8083063 Iustin Pop
        elif field == "oper_ram":
2457 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2458 8a23d2d3 Iustin Pop
            val = None
2459 a8083063 Iustin Pop
          elif instance.name in live_data:
2460 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2461 a8083063 Iustin Pop
          else:
2462 a8083063 Iustin Pop
            val = "-"
2463 a8083063 Iustin Pop
        elif field == "disk_template":
2464 a8083063 Iustin Pop
          val = instance.disk_template
2465 a8083063 Iustin Pop
        elif field == "ip":
2466 a8083063 Iustin Pop
          val = instance.nics[0].ip
2467 a8083063 Iustin Pop
        elif field == "bridge":
2468 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2469 a8083063 Iustin Pop
        elif field == "mac":
2470 a8083063 Iustin Pop
          val = instance.nics[0].mac
2471 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2472 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2473 644eeef9 Iustin Pop
          if disk is None:
2474 8a23d2d3 Iustin Pop
            val = None
2475 644eeef9 Iustin Pop
          else:
2476 644eeef9 Iustin Pop
            val = disk.size
2477 d6d415e8 Iustin Pop
        elif field == "vcpus":
2478 d6d415e8 Iustin Pop
          val = instance.vcpus
2479 130a6a6f Iustin Pop
        elif field == "tags":
2480 130a6a6f Iustin Pop
          val = list(instance.GetTags())
2481 a8083063 Iustin Pop
        else:
2482 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2483 a8083063 Iustin Pop
        iout.append(val)
2484 a8083063 Iustin Pop
      output.append(iout)
2485 a8083063 Iustin Pop
2486 a8083063 Iustin Pop
    return output
2487 a8083063 Iustin Pop
2488 a8083063 Iustin Pop
2489 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2490 a8083063 Iustin Pop
  """Failover an instance.
2491 a8083063 Iustin Pop

2492 a8083063 Iustin Pop
  """
2493 a8083063 Iustin Pop
  HPATH = "instance-failover"
2494 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2495 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2496 a8083063 Iustin Pop
2497 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2498 a8083063 Iustin Pop
    """Build hooks env.
2499 a8083063 Iustin Pop

2500 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2501 a8083063 Iustin Pop

2502 a8083063 Iustin Pop
    """
2503 a8083063 Iustin Pop
    env = {
2504 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2505 a8083063 Iustin Pop
      }
2506 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2507 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2508 a8083063 Iustin Pop
    return env, nl, nl
2509 a8083063 Iustin Pop
2510 a8083063 Iustin Pop
  def CheckPrereq(self):
2511 a8083063 Iustin Pop
    """Check prerequisites.
2512 a8083063 Iustin Pop

2513 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2514 a8083063 Iustin Pop

2515 a8083063 Iustin Pop
    """
2516 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2517 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2518 a8083063 Iustin Pop
    if instance is None:
2519 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2520 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2521 a8083063 Iustin Pop
2522 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2523 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2524 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2525 2a710df1 Michael Hanselmann
2526 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2527 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2528 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2529 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2530 2a710df1 Michael Hanselmann
2531 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2532 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2533 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2534 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2535 3a7c308e Guido Trotter
2536 a8083063 Iustin Pop
    # check bridge existance
2537 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2538 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2539 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2540 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2541 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2542 a8083063 Iustin Pop
2543 a8083063 Iustin Pop
    self.instance = instance
2544 a8083063 Iustin Pop
2545 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2546 a8083063 Iustin Pop
    """Failover an instance.
2547 a8083063 Iustin Pop

2548 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2549 a8083063 Iustin Pop
    starting it on the secondary.
2550 a8083063 Iustin Pop

2551 a8083063 Iustin Pop
    """
2552 a8083063 Iustin Pop
    instance = self.instance
2553 a8083063 Iustin Pop
2554 a8083063 Iustin Pop
    source_node = instance.primary_node
2555 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2556 a8083063 Iustin Pop
2557 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2558 a8083063 Iustin Pop
    for dev in instance.disks:
2559 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2560 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2561 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2562 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2563 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2564 a8083063 Iustin Pop
2565 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2566 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2567 a8083063 Iustin Pop
                (instance.name, source_node))
2568 a8083063 Iustin Pop
2569 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2570 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2571 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2572 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2573 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2574 24a40d57 Iustin Pop
      else:
2575 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2576 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2577 a8083063 Iustin Pop
2578 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2579 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2580 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2581 a8083063 Iustin Pop
2582 a8083063 Iustin Pop
    instance.primary_node = target_node
2583 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2584 b6102dab Guido Trotter
    self.cfg.Update(instance)
2585 a8083063 Iustin Pop
2586 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2587 12a0cfbe Guido Trotter
    if instance.status == "up":
2588 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2589 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2590 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2591 12a0cfbe Guido Trotter
2592 12a0cfbe Guido Trotter
      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2593 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2594 12a0cfbe Guido Trotter
      if not disks_ok:
2595 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2596 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2597 a8083063 Iustin Pop
2598 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2599 12a0cfbe Guido Trotter
      if not rpc.call_instance_start(target_node, instance, None):
2600 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2601 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2602 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2603 a8083063 Iustin Pop
2604 a8083063 Iustin Pop
2605 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2606 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2607 a8083063 Iustin Pop

2608 a8083063 Iustin Pop
  This always creates all devices.
2609 a8083063 Iustin Pop

2610 a8083063 Iustin Pop
  """
2611 a8083063 Iustin Pop
  if device.children:
2612 a8083063 Iustin Pop
    for child in device.children:
2613 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2614 a8083063 Iustin Pop
        return False
2615 a8083063 Iustin Pop
2616 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2617 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2618 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2619 a8083063 Iustin Pop
  if not new_id:
2620 a8083063 Iustin Pop
    return False
2621 a8083063 Iustin Pop
  if device.physical_id is None:
2622 a8083063 Iustin Pop
    device.physical_id = new_id
2623 a8083063 Iustin Pop
  return True
2624 a8083063 Iustin Pop
2625 a8083063 Iustin Pop
2626 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2627 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2628 a8083063 Iustin Pop

2629 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2630 a8083063 Iustin Pop
  all its children.
2631 a8083063 Iustin Pop

2632 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2633 a8083063 Iustin Pop

2634 a8083063 Iustin Pop
  """
2635 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2636 a8083063 Iustin Pop
    force = True
2637 a8083063 Iustin Pop
  if device.children:
2638 a8083063 Iustin Pop
    for child in device.children:
2639 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2640 3f78eef2 Iustin Pop
                                        child, force, info):
2641 a8083063 Iustin Pop
        return False
2642 a8083063 Iustin Pop
2643 a8083063 Iustin Pop
  if not force:
2644 a8083063 Iustin Pop
    return True
2645 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2646 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2647 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2648 a8083063 Iustin Pop
  if not new_id:
2649 a8083063 Iustin Pop
    return False
2650 a8083063 Iustin Pop
  if device.physical_id is None:
2651 a8083063 Iustin Pop
    device.physical_id = new_id
2652 a8083063 Iustin Pop
  return True
2653 a8083063 Iustin Pop
2654 a8083063 Iustin Pop
2655 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2656 923b1523 Iustin Pop
  """Generate a suitable LV name.
2657 923b1523 Iustin Pop

2658 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2659 923b1523 Iustin Pop

2660 923b1523 Iustin Pop
  """
2661 923b1523 Iustin Pop
  results = []
2662 923b1523 Iustin Pop
  for val in exts:
2663 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2664 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2665 923b1523 Iustin Pop
  return results
2666 923b1523 Iustin Pop
2667 923b1523 Iustin Pop
2668 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2669 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2670 a1f445d3 Iustin Pop

2671 a1f445d3 Iustin Pop
  """
2672 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2673 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2674 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2675 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2676 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2677 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2678 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2679 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2680 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2681 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2682 a1f445d3 Iustin Pop
  return drbd_dev
2683 a1f445d3 Iustin Pop
2684 7c0d6283 Michael Hanselmann
2685 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2686 a8083063 Iustin Pop
                          instance_name, primary_node,
2687 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
2688 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
2689 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2690 a8083063 Iustin Pop

2691 a8083063 Iustin Pop
  """
2692 a8083063 Iustin Pop
  #TODO: compute space requirements
2693 a8083063 Iustin Pop
2694 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2695 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2696 a8083063 Iustin Pop
    disks = []
2697 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2698 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2699 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2700 923b1523 Iustin Pop
2701 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2702 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2703 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2704 a8083063 Iustin Pop
                           iv_name = "sda")
2705 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2706 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2707 a8083063 Iustin Pop
                           iv_name = "sdb")
2708 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2709 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2710 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2711 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2712 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2713 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2714 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2715 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2716 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2717 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2718 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2719 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2720 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
2721 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
2722 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
2723 0f1a06e3 Manuel Franceschini
2724 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
2725 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
2726 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
2727 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
2728 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
2729 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
2730 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
2731 a8083063 Iustin Pop
  else:
2732 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2733 a8083063 Iustin Pop
  return disks
2734 a8083063 Iustin Pop
2735 a8083063 Iustin Pop
2736 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2737 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2738 3ecf6786 Iustin Pop

2739 3ecf6786 Iustin Pop
  """
2740 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2741 a0c3fea1 Michael Hanselmann
2742 a0c3fea1 Michael Hanselmann
2743 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2744 a8083063 Iustin Pop
  """Create all disks for an instance.
2745 a8083063 Iustin Pop

2746 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2747 a8083063 Iustin Pop

2748 a8083063 Iustin Pop
  Args:
2749 a8083063 Iustin Pop
    instance: the instance object
2750 a8083063 Iustin Pop

2751 a8083063 Iustin Pop
  Returns:
2752 a8083063 Iustin Pop
    True or False showing the success of the creation process
2753 a8083063 Iustin Pop

2754 a8083063 Iustin Pop
  """
2755 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2756 a0c3fea1 Michael Hanselmann
2757 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2758 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2759 0f1a06e3 Manuel Franceschini
    result = rpc.call_file_storage_dir_create(instance.primary_node,
2760 0f1a06e3 Manuel Franceschini
                                              file_storage_dir)
2761 0f1a06e3 Manuel Franceschini
2762 0f1a06e3 Manuel Franceschini
    if not result:
2763 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
2764 0f1a06e3 Manuel Franceschini
      return False
2765 0f1a06e3 Manuel Franceschini
2766 0f1a06e3 Manuel Franceschini
    if not result[0]:
2767 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
2768 0f1a06e3 Manuel Franceschini
      return False
2769 0f1a06e3 Manuel Franceschini
2770 a8083063 Iustin Pop
  for device in instance.disks:
2771 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2772 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
2773 a8083063 Iustin Pop
    #HARDCODE
2774 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2775 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2776 3f78eef2 Iustin Pop
                                        device, False, info):
2777 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2778 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2779 a8083063 Iustin Pop
        return False
2780 a8083063 Iustin Pop
    #HARDCODE
2781 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2782 3f78eef2 Iustin Pop
                                    instance, device, info):
2783 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2784 a8083063 Iustin Pop
                   device.iv_name)
2785 a8083063 Iustin Pop
      return False
2786 1c6e3627 Manuel Franceschini
2787 a8083063 Iustin Pop
  return True
2788 a8083063 Iustin Pop
2789 a8083063 Iustin Pop
2790 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2791 a8083063 Iustin Pop
  """Remove all disks for an instance.
2792 a8083063 Iustin Pop

2793 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2794 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2795 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2796 a8083063 Iustin Pop
  with `_CreateDisks()`).
2797 a8083063 Iustin Pop

2798 a8083063 Iustin Pop
  Args:
2799 a8083063 Iustin Pop
    instance: the instance object
2800 a8083063 Iustin Pop

2801 a8083063 Iustin Pop
  Returns:
2802 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2803 a8083063 Iustin Pop

2804 a8083063 Iustin Pop
  """
2805 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2806 a8083063 Iustin Pop
2807 a8083063 Iustin Pop
  result = True
2808 a8083063 Iustin Pop
  for device in instance.disks:
2809 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2810 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2811 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2812 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2813 a8083063 Iustin Pop
                     " continuing anyway" %
2814 a8083063 Iustin Pop
                     (device.iv_name, node))
2815 a8083063 Iustin Pop
        result = False
2816 0f1a06e3 Manuel Franceschini
2817 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
2818 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
2819 0f1a06e3 Manuel Franceschini
    if not rpc.call_file_storage_dir_remove(instance.primary_node,
2820 0f1a06e3 Manuel Franceschini
                                            file_storage_dir):
2821 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
2822 0f1a06e3 Manuel Franceschini
      result = False
2823 0f1a06e3 Manuel Franceschini
2824 a8083063 Iustin Pop
  return result
2825 a8083063 Iustin Pop
2826 a8083063 Iustin Pop
2827 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
2828 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
2829 e2fe6369 Iustin Pop

2830 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
2831 e2fe6369 Iustin Pop

2832 e2fe6369 Iustin Pop
  """
2833 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
2834 e2fe6369 Iustin Pop
  req_size_dict = {
2835 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
2836 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
2837 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
2838 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
2839 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
2840 e2fe6369 Iustin Pop
  }
2841 e2fe6369 Iustin Pop
2842 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
2843 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
2844 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
2845 e2fe6369 Iustin Pop
2846 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
2847 e2fe6369 Iustin Pop
2848 e2fe6369 Iustin Pop
2849 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2850 a8083063 Iustin Pop
  """Create an instance.
2851 a8083063 Iustin Pop

2852 a8083063 Iustin Pop
  """
2853 a8083063 Iustin Pop
  HPATH = "instance-add"
2854 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2855 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
2856 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2857 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
2858 a8083063 Iustin Pop
2859 538475ca Iustin Pop
  def _RunAllocator(self):
2860 538475ca Iustin Pop
    """Run the allocator based on input opcode.
2861 538475ca Iustin Pop

2862 538475ca Iustin Pop
    """
2863 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
2864 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
2865 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
2866 538475ca Iustin Pop
             "bridge": self.op.bridge}]
2867 d1c2dd75 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
2868 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
2869 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
2870 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
2871 d1c2dd75 Iustin Pop
                     tags=[],
2872 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
2873 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
2874 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
2875 d1c2dd75 Iustin Pop
                     disks=disks,
2876 d1c2dd75 Iustin Pop
                     nics=nics,
2877 29859cb7 Iustin Pop
                     )
2878 d1c2dd75 Iustin Pop
2879 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
2880 d1c2dd75 Iustin Pop
2881 d1c2dd75 Iustin Pop
    if not ial.success:
2882 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
2883 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
2884 d1c2dd75 Iustin Pop
                                                           ial.info))
2885 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
2886 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
2887 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
2888 27579978 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
2889 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
2890 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
2891 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
2892 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
2893 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
2894 27579978 Iustin Pop
    if ial.required_nodes == 2:
2895 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
2896 538475ca Iustin Pop
2897 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2898 a8083063 Iustin Pop
    """Build hooks env.
2899 a8083063 Iustin Pop

2900 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2901 a8083063 Iustin Pop

2902 a8083063 Iustin Pop
    """
2903 a8083063 Iustin Pop
    env = {
2904 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2905 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2906 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2907 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2908 a8083063 Iustin Pop
      }
2909 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2910 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2911 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2912 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2913 396e1b78 Michael Hanselmann
2914 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2915 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2916 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2917 396e1b78 Michael Hanselmann
      status=self.instance_status,
2918 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2919 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2920 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2921 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
2922 396e1b78 Michael Hanselmann
    ))
2923 a8083063 Iustin Pop
2924 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2925 a8083063 Iustin Pop
          self.secondaries)
2926 a8083063 Iustin Pop
    return env, nl, nl
2927 a8083063 Iustin Pop
2928 a8083063 Iustin Pop
2929 a8083063 Iustin Pop
  def CheckPrereq(self):
2930 a8083063 Iustin Pop
    """Check prerequisites.
2931 a8083063 Iustin Pop

2932 a8083063 Iustin Pop
    """
2933 538475ca Iustin Pop
    # set optional parameters to none if they don't exist
2934 538475ca Iustin Pop
    for attr in ["kernel_path", "initrd_path", "hvm_boot_order", "pnode",
2935 31a853d2 Iustin Pop
                 "iallocator", "hvm_acpi", "hvm_pae", "hvm_cdrom_image_path",
2936 31a853d2 Iustin Pop
                 "vnc_bind_address"]:
2937 40ed12dd Guido Trotter
      if not hasattr(self.op, attr):
2938 40ed12dd Guido Trotter
        setattr(self.op, attr, None)
2939 40ed12dd Guido Trotter
2940 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2941 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2942 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2943 3ecf6786 Iustin Pop
                                 self.op.mode)
2944 a8083063 Iustin Pop
2945 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
2946 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
2947 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
2948 eedc99de Manuel Franceschini
                                 " instances")
2949 eedc99de Manuel Franceschini
2950 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2951 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2952 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2953 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2954 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2955 3ecf6786 Iustin Pop
                                   " node and path options")
2956 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2957 a8083063 Iustin Pop
      if src_node_full is None:
2958 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2959 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2960 a8083063 Iustin Pop
2961 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2962 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2963 a8083063 Iustin Pop
2964 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2965 a8083063 Iustin Pop
2966 a8083063 Iustin Pop
      if not export_info:
2967 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2968 a8083063 Iustin Pop
2969 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2970 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2971 a8083063 Iustin Pop
2972 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2973 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2974 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2975 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
2976 a8083063 Iustin Pop
2977 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2978 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
2979 3ecf6786 Iustin Pop
                                   " one data disk")
2980 a8083063 Iustin Pop
2981 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2982 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2983 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2984 a8083063 Iustin Pop
                                                         'disk0_dump'))
2985 a8083063 Iustin Pop
      self.src_image = diskimage
2986 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2987 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2988 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
2989 a8083063 Iustin Pop
2990 901a65c1 Iustin Pop
    #### instance parameters check
2991 901a65c1 Iustin Pop
2992 a8083063 Iustin Pop
    # disk template and mirror node verification
2993 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2994 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
2995 a8083063 Iustin Pop
2996 901a65c1 Iustin Pop
    # instance name verification
2997 901a65c1 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
2998 901a65c1 Iustin Pop
2999 901a65c1 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
3000 901a65c1 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
3001 901a65c1 Iustin Pop
    if instance_name in instance_list:
3002 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3003 901a65c1 Iustin Pop
                                 instance_name)
3004 901a65c1 Iustin Pop
3005 901a65c1 Iustin Pop
    # ip validity checks
3006 901a65c1 Iustin Pop
    ip = getattr(self.op, "ip", None)
3007 901a65c1 Iustin Pop
    if ip is None or ip.lower() == "none":
3008 901a65c1 Iustin Pop
      inst_ip = None
3009 901a65c1 Iustin Pop
    elif ip.lower() == "auto":
3010 901a65c1 Iustin Pop
      inst_ip = hostname1.ip
3011 901a65c1 Iustin Pop
    else:
3012 901a65c1 Iustin Pop
      if not utils.IsValidIP(ip):
3013 901a65c1 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3014 901a65c1 Iustin Pop
                                   " like a valid IP" % ip)
3015 901a65c1 Iustin Pop
      inst_ip = ip
3016 901a65c1 Iustin Pop
    self.inst_ip = self.op.ip = inst_ip
3017 901a65c1 Iustin Pop
3018 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3019 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3020 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3021 901a65c1 Iustin Pop
3022 901a65c1 Iustin Pop
    if self.op.ip_check:
3023 901a65c1 Iustin Pop
      if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT):
3024 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3025 901a65c1 Iustin Pop
                                   (hostname1.ip, instance_name))
3026 901a65c1 Iustin Pop
3027 901a65c1 Iustin Pop
    # MAC address verification
3028 901a65c1 Iustin Pop
    if self.op.mac != "auto":
3029 901a65c1 Iustin Pop
      if not utils.IsValidMac(self.op.mac.lower()):
3030 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3031 901a65c1 Iustin Pop
                                   self.op.mac)
3032 901a65c1 Iustin Pop
3033 901a65c1 Iustin Pop
    # bridge verification
3034 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3035 901a65c1 Iustin Pop
    if bridge is None:
3036 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3037 901a65c1 Iustin Pop
    else:
3038 901a65c1 Iustin Pop
      self.op.bridge = bridge
3039 901a65c1 Iustin Pop
3040 901a65c1 Iustin Pop
    # boot order verification
3041 901a65c1 Iustin Pop
    if self.op.hvm_boot_order is not None:
3042 901a65c1 Iustin Pop
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3043 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid boot order specified,"
3044 901a65c1 Iustin Pop
                                   " must be one or more of [acdn]")
3045 901a65c1 Iustin Pop
    # file storage checks
3046 0f1a06e3 Manuel Franceschini
    if (self.op.file_driver and
3047 0f1a06e3 Manuel Franceschini
        not self.op.file_driver in constants.FILE_DRIVER):
3048 0f1a06e3 Manuel Franceschini
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3049 0f1a06e3 Manuel Franceschini
                                 self.op.file_driver)
3050 0f1a06e3 Manuel Franceschini
3051 0f1a06e3 Manuel Franceschini
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3052 b4de68a9 Iustin Pop
      raise errors.OpPrereqError("File storage directory not a relative"
3053 b4de68a9 Iustin Pop
                                 " path")
3054 538475ca Iustin Pop
    #### allocator run
3055 538475ca Iustin Pop
3056 538475ca Iustin Pop
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3057 538475ca Iustin Pop
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3058 538475ca Iustin Pop
                                 " node must be given")
3059 538475ca Iustin Pop
3060 538475ca Iustin Pop
    if self.op.iallocator is not None:
3061 538475ca Iustin Pop
      self._RunAllocator()
3062 0f1a06e3 Manuel Franceschini
3063 901a65c1 Iustin Pop
    #### node related checks
3064 901a65c1 Iustin Pop
3065 901a65c1 Iustin Pop
    # check primary node
3066 901a65c1 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
3067 901a65c1 Iustin Pop
    if pnode is None:
3068 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
3069 901a65c1 Iustin Pop
                                 self.op.pnode)
3070 901a65c1 Iustin Pop
    self.op.pnode = pnode.name
3071 901a65c1 Iustin Pop
    self.pnode = pnode
3072 901a65c1 Iustin Pop
    self.secondaries = []
3073 901a65c1 Iustin Pop
3074 901a65c1 Iustin Pop
    # mirror node verification
3075 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3076 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
3077 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3078 3ecf6786 Iustin Pop
                                   " a mirror node")
3079 a8083063 Iustin Pop
3080 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
3081 a8083063 Iustin Pop
      if snode_name is None:
3082 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
3083 3ecf6786 Iustin Pop
                                   self.op.snode)
3084 a8083063 Iustin Pop
      elif snode_name == pnode.name:
3085 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3086 3ecf6786 Iustin Pop
                                   " the primary node.")
3087 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
3088 a8083063 Iustin Pop
3089 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3090 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3091 ed1ebc60 Guido Trotter
3092 8d75db10 Iustin Pop
    # Check lv size requirements
3093 8d75db10 Iustin Pop
    if req_size is not None:
3094 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3095 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3096 8d75db10 Iustin Pop
      for node in nodenames:
3097 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3098 8d75db10 Iustin Pop
        if not info:
3099 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3100 3e91897b Iustin Pop
                                     " from node '%s'" % node)
3101 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3102 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3103 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3104 8d75db10 Iustin Pop
                                     " node %s" % node)
3105 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3106 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3107 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3108 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3109 ed1ebc60 Guido Trotter
3110 a8083063 Iustin Pop
    # os verification
3111 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3112 dfa96ded Guido Trotter
    if not os_obj:
3113 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3114 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3115 a8083063 Iustin Pop
3116 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3117 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3118 3b6d8c9b Iustin Pop
3119 a8083063 Iustin Pop
3120 901a65c1 Iustin Pop
    # bridge check on primary node
3121 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3122 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3123 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3124 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3125 a8083063 Iustin Pop
3126 49ce1563 Iustin Pop
    # memory check on primary node
3127 49ce1563 Iustin Pop
    if self.op.start:
3128 49ce1563 Iustin Pop
      _CheckNodeFreeMemory(self.cfg, self.pnode.name,
3129 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3130 49ce1563 Iustin Pop
                           self.op.mem_size)
3131 49ce1563 Iustin Pop
3132 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
3133 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
3134 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
3135 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
3136 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
3137 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3138 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
3139 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
3140 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
3141 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
3142 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3143 31a853d2 Iustin Pop
3144 31a853d2 Iustin Pop
    # vnc_bind_address verification
3145 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
3146 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
3147 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
3148 31a853d2 Iustin Pop
                                   " like a valid IP address" %
3149 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
3150 31a853d2 Iustin Pop
3151 a8083063 Iustin Pop
    if self.op.start:
3152 a8083063 Iustin Pop
      self.instance_status = 'up'
3153 a8083063 Iustin Pop
    else:
3154 a8083063 Iustin Pop
      self.instance_status = 'down'
3155 a8083063 Iustin Pop
3156 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3157 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3158 a8083063 Iustin Pop

3159 a8083063 Iustin Pop
    """
3160 a8083063 Iustin Pop
    instance = self.op.instance_name
3161 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3162 a8083063 Iustin Pop
3163 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3164 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3165 1862d460 Alexander Schreiber
    else:
3166 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3167 1862d460 Alexander Schreiber
3168 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3169 a8083063 Iustin Pop
    if self.inst_ip is not None:
3170 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3171 a8083063 Iustin Pop
3172 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3173 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3174 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3175 2a6469d5 Alexander Schreiber
    else:
3176 2a6469d5 Alexander Schreiber
      network_port = None
3177 58acb49d Alexander Schreiber
3178 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is None:
3179 31a853d2 Iustin Pop
      self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3180 31a853d2 Iustin Pop
3181 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3182 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3183 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3184 2c313123 Manuel Franceschini
    else:
3185 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3186 2c313123 Manuel Franceschini
3187 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3188 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3189 0f1a06e3 Manuel Franceschini
                                        self.sstore.GetFileStorageDir(),
3190 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3191 0f1a06e3 Manuel Franceschini
3192 0f1a06e3 Manuel Franceschini
3193 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3194 a8083063 Iustin Pop
                                  self.op.disk_template,
3195 a8083063 Iustin Pop
                                  instance, pnode_name,
3196 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3197 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3198 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3199 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3200 a8083063 Iustin Pop
3201 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3202 a8083063 Iustin Pop
                            primary_node=pnode_name,
3203 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3204 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3205 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3206 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3207 a8083063 Iustin Pop
                            status=self.instance_status,
3208 58acb49d Alexander Schreiber
                            network_port=network_port,
3209 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3210 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3211 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3212 31a853d2 Iustin Pop
                            hvm_acpi=self.op.hvm_acpi,
3213 31a853d2 Iustin Pop
                            hvm_pae=self.op.hvm_pae,
3214 31a853d2 Iustin Pop
                            hvm_cdrom_image_path=self.op.hvm_cdrom_image_path,
3215 31a853d2 Iustin Pop
                            vnc_bind_address=self.op.vnc_bind_address,
3216 a8083063 Iustin Pop
                            )
3217 a8083063 Iustin Pop
3218 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3219 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3220 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3221 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3222 a8083063 Iustin Pop
3223 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3224 a8083063 Iustin Pop
3225 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3226 a8083063 Iustin Pop
3227 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3228 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3229 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3230 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3231 a8083063 Iustin Pop
      time.sleep(15)
3232 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3233 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3234 a8083063 Iustin Pop
    else:
3235 a8083063 Iustin Pop
      disk_abort = False
3236 a8083063 Iustin Pop
3237 a8083063 Iustin Pop
    if disk_abort:
3238 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3239 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3240 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3241 3ecf6786 Iustin Pop
                               " this instance")
3242 a8083063 Iustin Pop
3243 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3244 a8083063 Iustin Pop
                (instance, pnode_name))
3245 a8083063 Iustin Pop
3246 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3247 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3248 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3249 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3250 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3251 3ecf6786 Iustin Pop
                                   " on node %s" %
3252 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3253 a8083063 Iustin Pop
3254 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3255 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3256 a8083063 Iustin Pop
        src_node = self.op.src_node
3257 a8083063 Iustin Pop
        src_image = self.src_image
3258 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3259 a8083063 Iustin Pop
                                                src_node, src_image):
3260 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3261 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3262 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3263 a8083063 Iustin Pop
      else:
3264 a8083063 Iustin Pop
        # also checked in the prereq part
3265 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3266 3ecf6786 Iustin Pop
                                     % self.op.mode)
3267 a8083063 Iustin Pop
3268 a8083063 Iustin Pop
    if self.op.start:
3269 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3270 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3271 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3272 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3273 a8083063 Iustin Pop
3274 a8083063 Iustin Pop
3275 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3276 a8083063 Iustin Pop
  """Connect to an instance's console.
3277 a8083063 Iustin Pop

3278 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3279 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3280 a8083063 Iustin Pop
  console.
3281 a8083063 Iustin Pop

3282 a8083063 Iustin Pop
  """
3283 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3284 a8083063 Iustin Pop
3285 a8083063 Iustin Pop
  def CheckPrereq(self):
3286 a8083063 Iustin Pop
    """Check prerequisites.
3287 a8083063 Iustin Pop

3288 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3289 a8083063 Iustin Pop

3290 a8083063 Iustin Pop
    """
3291 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3292 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3293 a8083063 Iustin Pop
    if instance is None:
3294 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3295 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3296 a8083063 Iustin Pop
    self.instance = instance
3297 a8083063 Iustin Pop
3298 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3299 a8083063 Iustin Pop
    """Connect to the console of an instance
3300 a8083063 Iustin Pop

3301 a8083063 Iustin Pop
    """
3302 a8083063 Iustin Pop
    instance = self.instance
3303 a8083063 Iustin Pop
    node = instance.primary_node
3304 a8083063 Iustin Pop
3305 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3306 a8083063 Iustin Pop
    if node_insts is False:
3307 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3308 a8083063 Iustin Pop
3309 a8083063 Iustin Pop
    if instance.name not in node_insts:
3310 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3311 a8083063 Iustin Pop
3312 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3313 a8083063 Iustin Pop
3314 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3315 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3316 b047857b Michael Hanselmann
3317 82122173 Iustin Pop
    # build ssh cmdline
3318 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3319 a8083063 Iustin Pop
3320 a8083063 Iustin Pop
3321 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3322 a8083063 Iustin Pop
  """Replace the disks of an instance.
3323 a8083063 Iustin Pop

3324 a8083063 Iustin Pop
  """
3325 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3326 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3327 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3328 a8083063 Iustin Pop
3329 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3330 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3331 b6e82a65 Iustin Pop

3332 b6e82a65 Iustin Pop
    """
3333 b6e82a65 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3334 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3335 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3336 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3337 b6e82a65 Iustin Pop
3338 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3339 b6e82a65 Iustin Pop
3340 b6e82a65 Iustin Pop
    if not ial.success:
3341 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3342 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3343 b6e82a65 Iustin Pop
                                                           ial.info))
3344 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3345 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3346 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3347 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3348 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3349 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3350 b6e82a65 Iustin Pop
                    self.op.remote_node)
3351 b6e82a65 Iustin Pop
3352 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3353 a8083063 Iustin Pop
    """Build hooks env.
3354 a8083063 Iustin Pop

3355 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3356 a8083063 Iustin Pop

3357 a8083063 Iustin Pop
    """
3358 a8083063 Iustin Pop
    env = {
3359 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3360 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3361 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3362 a8083063 Iustin Pop
      }
3363 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3364 0834c866 Iustin Pop
    nl = [
3365 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3366 0834c866 Iustin Pop
      self.instance.primary_node,
3367 0834c866 Iustin Pop
      ]
3368 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3369 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3370 a8083063 Iustin Pop
    return env, nl, nl
3371 a8083063 Iustin Pop
3372 a8083063 Iustin Pop
  def CheckPrereq(self):
3373 a8083063 Iustin Pop
    """Check prerequisites.
3374 a8083063 Iustin Pop

3375 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3376 a8083063 Iustin Pop

3377 a8083063 Iustin Pop
    """
3378 b6e82a65 Iustin Pop
    if not hasattr(self.op, "remote_node"):
3379 b6e82a65 Iustin Pop
      self.op.remote_node = None
3380 b6e82a65 Iustin Pop
3381 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3382 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3383 a8083063 Iustin Pop
    if instance is None:
3384 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3385 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3386 a8083063 Iustin Pop
    self.instance = instance
3387 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3388 a8083063 Iustin Pop
3389 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3390 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3391 a9e0c397 Iustin Pop
                                 " network mirrored.")
3392 a8083063 Iustin Pop
3393 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3394 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3395 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3396 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3397 a8083063 Iustin Pop
3398 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3399 a9e0c397 Iustin Pop
3400 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3401 b6e82a65 Iustin Pop
    if ia_name is not None:
3402 b6e82a65 Iustin Pop
      if self.op.remote_node is not None:
3403 b6e82a65 Iustin Pop
        raise errors.OpPrereqError("Give either the iallocator or the new"
3404 b6e82a65 Iustin Pop
                                   " secondary, not both")
3405 b6e82a65 Iustin Pop
      self.op.remote_node = self._RunAllocator()
3406 b6e82a65 Iustin Pop
3407 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3408 a9e0c397 Iustin Pop
    if remote_node is not None:
3409 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3410 a8083063 Iustin Pop
      if remote_node is None:
3411 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3412 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3413 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3414 a9e0c397 Iustin Pop
    else:
3415 a9e0c397 Iustin Pop
      self.remote_node_info = None
3416 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3417 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3418 3ecf6786 Iustin Pop
                                 " the instance.")
3419 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3420 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3421 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3422 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3423 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3424 0834c866 Iustin Pop
                                   " replacement")
3425 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3426 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3427 7df43a76 Iustin Pop
          remote_node is not None):
3428 7df43a76 Iustin Pop
        # switch to replace secondary mode
3429 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3430 7df43a76 Iustin Pop
3431 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3432 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3433 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3434 a9e0c397 Iustin Pop
                                   " both at once")
3435 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3436 a9e0c397 Iustin Pop
        if remote_node is not None:
3437 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3438 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3439 a9e0c397 Iustin Pop
                                     " node disk replacement")
3440 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3441 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3442 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3443 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3444 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3445 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3446 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3447 a9e0c397 Iustin Pop
      else:
3448 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3449 a9e0c397 Iustin Pop
3450 a9e0c397 Iustin Pop
    for name in self.op.disks:
3451 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3452 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3453 a9e0c397 Iustin Pop
                                   (name, instance.name))
3454 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3455 a8083063 Iustin Pop
3456 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3457 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3458 a9e0c397 Iustin Pop

3459 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3460 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3461 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3462 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3463 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3464 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3465 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3466 a9e0c397 Iustin Pop
      - wait for sync across all devices
3467 a9e0c397 Iustin Pop
      - for each modified disk:
3468 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3469 a9e0c397 Iustin Pop

3470 a9e0c397 Iustin Pop
    Failures are not very well handled.
3471 cff90b79 Iustin Pop

3472 a9e0c397 Iustin Pop
    """
3473 cff90b79 Iustin Pop
    steps_total = 6
3474 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3475 a9e0c397 Iustin Pop
    instance = self.instance
3476 a9e0c397 Iustin Pop
    iv_names = {}
3477 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3478 a9e0c397 Iustin Pop
    # start of work
3479 a9e0c397 Iustin Pop
    cfg = self.cfg
3480 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3481 cff90b79 Iustin Pop
    oth_node = self.oth_node
3482 cff90b79 Iustin Pop
3483 cff90b79 Iustin Pop
    # Step: check device activation
3484 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3485 cff90b79 Iustin Pop
    info("checking volume groups")
3486 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3487 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3488 cff90b79 Iustin Pop
    if not results:
3489 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3490 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3491 cff90b79 Iustin Pop
      res = results.get(node, False)
3492 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3493 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3494 cff90b79 Iustin Pop
                                 (my_vg, node))
3495 cff90b79 Iustin Pop
    for dev in instance.disks:
3496 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3497 cff90b79 Iustin Pop
        continue
3498 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3499 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3500 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3501 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3502 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3503 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3504 cff90b79 Iustin Pop
3505 cff90b79 Iustin Pop
    # Step: check other node consistency
3506 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3507 cff90b79 Iustin Pop
    for dev in instance.disks:
3508 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3509 cff90b79 Iustin Pop
        continue
3510 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3511 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3512 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3513 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3514 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3515 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3516 cff90b79 Iustin Pop
3517 cff90b79 Iustin Pop
    # Step: create new storage
3518 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3519 a9e0c397 Iustin Pop
    for dev in instance.disks:
3520 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3521 a9e0c397 Iustin Pop
        continue
3522 a9e0c397 Iustin Pop
      size = dev.size
3523 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3524 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3525 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3526 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3527 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3528 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3529 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3530 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3531 a9e0c397 Iustin Pop
      old_lvs = dev.children
3532 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3533 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3534 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3535 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3536 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3537 a9e0c397 Iustin Pop
      # are talking about the secondary node
3538 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3539 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3540 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3541 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3542 a9e0c397 Iustin Pop
                                   " node '%s'" %
3543 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3544 a9e0c397 Iustin Pop
3545 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3546 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3547 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3548 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3549 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3550 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3551 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3552 cff90b79 Iustin Pop
      #dev.children = []
3553 cff90b79 Iustin Pop
      #cfg.Update(instance)
3554 a9e0c397 Iustin Pop
3555 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3556 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3557 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3558 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3559 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3560 cff90b79 Iustin Pop
3561 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3562 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3563 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3564 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3565 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3566 cff90b79 Iustin Pop
      rlist = []
3567 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3568 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3569 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3570 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3571 cff90b79 Iustin Pop
3572 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3573 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3574 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3575 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3576 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3577 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3578 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3579 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3580 cff90b79 Iustin Pop
3581 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3582 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3583 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3584 a9e0c397 Iustin Pop
3585 cff90b79 Iustin Pop
      for disk in old_lvs:
3586 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3587 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3588 a9e0c397 Iustin Pop
3589 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3590 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3591 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3592 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3593 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3594 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3595 cff90b79 Iustin Pop
                    " logical volumes")
3596 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3597 a9e0c397 Iustin Pop
3598 a9e0c397 Iustin Pop
      dev.children = new_lvs
3599 a9e0c397 Iustin Pop
      cfg.Update(instance)
3600 a9e0c397 Iustin Pop
3601 cff90b79 Iustin Pop
    # Step: wait for sync
3602 a9e0c397 Iustin Pop
3603 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3604 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3605 a9e0c397 Iustin Pop
    # return value
3606 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3607 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3608 a9e0c397 Iustin Pop
3609 a9e0c397 Iustin Pop
    # so check manually all the devices
3610 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3611 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3612 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3613 a9e0c397 Iustin Pop
      if is_degr:
3614 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3615 a9e0c397 Iustin Pop
3616 cff90b79 Iustin Pop
    # Step: remove old storage
3617 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3618 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3619 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3620 a9e0c397 Iustin Pop
      for lv in old_lvs:
3621 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3622 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3623 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3624 a9e0c397 Iustin Pop
          continue
3625 a9e0c397 Iustin Pop
3626 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3627 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3628 a9e0c397 Iustin Pop

3629 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3630 a9e0c397 Iustin Pop
      - for all disks of the instance:
3631 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3632 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3633 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3634 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3635 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3636 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3637 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3638 a9e0c397 Iustin Pop
          not network enabled
3639 a9e0c397 Iustin Pop
      - wait for sync across all devices
3640 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3641 a9e0c397 Iustin Pop

3642 a9e0c397 Iustin Pop
    Failures are not very well handled.
3643 0834c866 Iustin Pop

3644 a9e0c397 Iustin Pop
    """
3645 0834c866 Iustin Pop
    steps_total = 6
3646 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3647 a9e0c397 Iustin Pop
    instance = self.instance
3648 a9e0c397 Iustin Pop
    iv_names = {}
3649 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3650 a9e0c397 Iustin Pop
    # start of work
3651 a9e0c397 Iustin Pop
    cfg = self.cfg
3652 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3653 a9e0c397 Iustin Pop
    new_node = self.new_node
3654 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3655 0834c866 Iustin Pop
3656 0834c866 Iustin Pop
    # Step: check device activation
3657 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3658 0834c866 Iustin Pop
    info("checking volume groups")
3659 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3660 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3661 0834c866 Iustin Pop
    if not results:
3662 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3663 0834c866 Iustin Pop
    for node in pri_node, new_node:
3664 0834c866 Iustin Pop
      res = results.get(node, False)
3665 0834c866 Iustin Pop
      if not res or my_vg not in res:
3666 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3667 0834c866 Iustin Pop
                                 (my_vg, node))
3668 0834c866 Iustin Pop
    for dev in instance.disks:
3669 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3670 0834c866 Iustin Pop
        continue
3671 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3672 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3673 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3674 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3675 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3676 0834c866 Iustin Pop
3677 0834c866 Iustin Pop
    # Step: check other node consistency
3678 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3679 0834c866 Iustin Pop
    for dev in instance.disks:
3680 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3681 0834c866 Iustin Pop
        continue
3682 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3683 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3684 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3685 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3686 0834c866 Iustin Pop
                                 pri_node)
3687 0834c866 Iustin Pop
3688 0834c866 Iustin Pop
    # Step: create new storage
3689 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3690 a9e0c397 Iustin Pop
    for dev in instance.disks:
3691 a9e0c397 Iustin Pop
      size = dev.size
3692 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3693 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3694 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3695 a9e0c397 Iustin Pop
      # are talking about the secondary node
3696 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3697 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3698 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3699 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3700 a9e0c397 Iustin Pop
                                   " node '%s'" %
3701 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3702 a9e0c397 Iustin Pop
3703 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3704 0834c866 Iustin Pop
3705 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3706 0834c866 Iustin Pop
    for dev in instance.disks:
3707 0834c866 Iustin Pop
      size = dev.size
3708 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3709 a9e0c397 Iustin Pop
      # create new devices on new_node
3710 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3711 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3712 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3713 a9e0c397 Iustin Pop
                              children=dev.children)
3714 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3715 3f78eef2 Iustin Pop
                                        new_drbd, False,
3716 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3717 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3718 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3719 a9e0c397 Iustin Pop
3720 0834c866 Iustin Pop
    for dev in instance.disks:
3721 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3722 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3723 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3724 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3725 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3726 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
3727 a9e0c397 Iustin Pop
3728 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
3729 642445d9 Iustin Pop
    done = 0
3730 642445d9 Iustin Pop
    for dev in instance.disks:
3731 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3732 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
3733 642445d9 Iustin Pop
      # detach from network
3734 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
3735 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
3736 642445d9 Iustin Pop
      # standalone state
3737 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
3738 642445d9 Iustin Pop
        done += 1
3739 642445d9 Iustin Pop
      else:
3740 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
3741 642445d9 Iustin Pop
                dev.iv_name)
3742 642445d9 Iustin Pop
3743 642445d9 Iustin Pop
    if not done:
3744 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
3745 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3746 642445d9 Iustin Pop
3747 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
3748 642445d9 Iustin Pop
    # the instance to point to the new secondary
3749 642445d9 Iustin Pop
    info("updating instance configuration")
3750 642445d9 Iustin Pop
    for dev in instance.disks:
3751 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3752 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3753 642445d9 Iustin Pop
    cfg.Update(instance)
3754 a9e0c397 Iustin Pop
3755 642445d9 Iustin Pop
    # and now perform the drbd attach
3756 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
3757 642445d9 Iustin Pop
    failures = []
3758 642445d9 Iustin Pop
    for dev in instance.disks:
3759 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3760 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
3761 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
3762 642445d9 Iustin Pop
      # is correct
3763 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3764 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3765 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3766 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
3767 a9e0c397 Iustin Pop
3768 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3769 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3770 a9e0c397 Iustin Pop
    # return value
3771 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3772 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3773 a9e0c397 Iustin Pop
3774 a9e0c397 Iustin Pop
    # so check manually all the devices
3775 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3776 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3777 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3778 a9e0c397 Iustin Pop
      if is_degr:
3779 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3780 a9e0c397 Iustin Pop
3781 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3782 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3783 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
3784 a9e0c397 Iustin Pop
      for lv in old_lvs:
3785 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3786 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3787 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
3788 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
3789 a9e0c397 Iustin Pop
3790 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3791 a9e0c397 Iustin Pop
    """Execute disk replacement.
3792 a9e0c397 Iustin Pop

3793 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3794 a9e0c397 Iustin Pop

3795 a9e0c397 Iustin Pop
    """
3796 a9e0c397 Iustin Pop
    instance = self.instance
3797 22985314 Guido Trotter
3798 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
3799 22985314 Guido Trotter
    if instance.status == "down":
3800 22985314 Guido Trotter
      op = opcodes.OpActivateInstanceDisks(instance_name=instance.name)
3801 22985314 Guido Trotter
      self.proc.ChainOpCode(op)
3802 22985314 Guido Trotter
3803 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3804 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
3805 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
3806 a9e0c397 Iustin Pop
      else:
3807 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
3808 a9e0c397 Iustin Pop
    else:
3809 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
3810 22985314 Guido Trotter
3811 22985314 Guido Trotter
    ret = fn(feedback_fn)
3812 22985314 Guido Trotter
3813 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
3814 22985314 Guido Trotter
    if instance.status == "down":
3815 22985314 Guido Trotter
      op = opcodes.OpDeactivateInstanceDisks(instance_name=instance.name)
3816 22985314 Guido Trotter
      self.proc.ChainOpCode(op)
3817 22985314 Guido Trotter
3818 22985314 Guido Trotter
    return ret
3819 a9e0c397 Iustin Pop
3820 a8083063 Iustin Pop
3821 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
3822 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
3823 8729e0d7 Iustin Pop

3824 8729e0d7 Iustin Pop
  """
3825 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
3826 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3827 8729e0d7 Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount"]
3828 8729e0d7 Iustin Pop
3829 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
3830 8729e0d7 Iustin Pop
    """Build hooks env.
3831 8729e0d7 Iustin Pop

3832 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3833 8729e0d7 Iustin Pop

3834 8729e0d7 Iustin Pop
    """
3835 8729e0d7 Iustin Pop
    env = {
3836 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
3837 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
3838 8729e0d7 Iustin Pop
      }
3839 8729e0d7 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3840 8729e0d7 Iustin Pop
    nl = [
3841 8729e0d7 Iustin Pop
      self.sstore.GetMasterNode(),
3842 8729e0d7 Iustin Pop
      self.instance.primary_node,
3843 8729e0d7 Iustin Pop
      ]
3844 8729e0d7 Iustin Pop
    return env, nl, nl
3845 8729e0d7 Iustin Pop
3846 8729e0d7 Iustin Pop
  def CheckPrereq(self):
3847 8729e0d7 Iustin Pop
    """Check prerequisites.
3848 8729e0d7 Iustin Pop

3849 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
3850 8729e0d7 Iustin Pop

3851 8729e0d7 Iustin Pop
    """
3852 8729e0d7 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3853 8729e0d7 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3854 8729e0d7 Iustin Pop
    if instance is None:
3855 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3856 8729e0d7 Iustin Pop
                                 self.op.instance_name)
3857 8729e0d7 Iustin Pop
    self.instance = instance
3858 8729e0d7 Iustin Pop
    self.op.instance_name = instance.name
3859 8729e0d7 Iustin Pop
3860 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
3861 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
3862 8729e0d7 Iustin Pop
                                 " growing.")
3863 8729e0d7 Iustin Pop
3864 8729e0d7 Iustin Pop
    if instance.FindDisk(self.op.disk) is None:
3865 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3866 c7cdfc90 Iustin Pop
                                 (self.op.disk, instance.name))
3867 8729e0d7 Iustin Pop
3868 8729e0d7 Iustin Pop
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
3869 8729e0d7 Iustin Pop
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3870 8729e0d7 Iustin Pop
    for node in nodenames:
3871 8729e0d7 Iustin Pop
      info = nodeinfo.get(node, None)
3872 8729e0d7 Iustin Pop
      if not info:
3873 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
3874 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
3875 8729e0d7 Iustin Pop
      vg_free = info.get('vg_free', None)
3876 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
3877 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
3878 8729e0d7 Iustin Pop
                                   " node %s" % node)
3879 8729e0d7 Iustin Pop
      if self.op.amount > info['vg_free']:
3880 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
3881 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
3882 8729e0d7 Iustin Pop
                                   (node, info['vg_free'], self.op.amount))
3883 8729e0d7 Iustin Pop
3884 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
3885 8729e0d7 Iustin Pop
    """Execute disk grow.
3886 8729e0d7 Iustin Pop

3887 8729e0d7 Iustin Pop
    """
3888 8729e0d7 Iustin Pop
    instance = self.instance
3889 8729e0d7 Iustin Pop
    disk = instance.FindDisk(self.op.disk)
3890 8729e0d7 Iustin Pop
    for node in (instance.secondary_nodes + (instance.primary_node,)):
3891 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
3892 8729e0d7 Iustin Pop
      result = rpc.call_blockdev_grow(node, disk, self.op.amount)
3893 8729e0d7 Iustin Pop
      if not result or not isinstance(result, tuple) or len(result) != 2:
3894 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s" % node)
3895 8729e0d7 Iustin Pop
      elif not result[0]:
3896 8729e0d7 Iustin Pop
        raise errors.OpExecError("grow request failed to node %s: %s" %
3897 8729e0d7 Iustin Pop
                                 (node, result[1]))
3898 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
3899 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
3900 8729e0d7 Iustin Pop
    return
3901 8729e0d7 Iustin Pop
3902 8729e0d7 Iustin Pop
3903 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3904 a8083063 Iustin Pop
  """Query runtime instance data.
3905 a8083063 Iustin Pop

3906 a8083063 Iustin Pop
  """
3907 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3908 a8083063 Iustin Pop
3909 a8083063 Iustin Pop
  def CheckPrereq(self):
3910 a8083063 Iustin Pop
    """Check prerequisites.
3911 a8083063 Iustin Pop

3912 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3913 a8083063 Iustin Pop

3914 a8083063 Iustin Pop
    """
3915 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3916 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
3917 a8083063 Iustin Pop
    if self.op.instances:
3918 a8083063 Iustin Pop
      self.wanted_instances = []
3919 a8083063 Iustin Pop
      names = self.op.instances
3920 a8083063 Iustin Pop
      for name in names:
3921 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3922 a8083063 Iustin Pop
        if instance is None:
3923 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
3924 515207af Guido Trotter
        self.wanted_instances.append(instance)
3925 a8083063 Iustin Pop
    else:
3926 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3927 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3928 a8083063 Iustin Pop
    return
3929 a8083063 Iustin Pop
3930 a8083063 Iustin Pop
3931 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3932 a8083063 Iustin Pop
    """Compute block device status.
3933 a8083063 Iustin Pop

3934 a8083063 Iustin Pop
    """
3935 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3936 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3937 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
3938 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3939 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3940 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3941 a8083063 Iustin Pop
      else:
3942 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3943 a8083063 Iustin Pop
3944 a8083063 Iustin Pop
    if snode:
3945 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
3946 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3947 a8083063 Iustin Pop
    else:
3948 a8083063 Iustin Pop
      dev_sstatus = None
3949 a8083063 Iustin Pop
3950 a8083063 Iustin Pop
    if dev.children:
3951 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3952 a8083063 Iustin Pop
                      for child in dev.children]
3953 a8083063 Iustin Pop
    else:
3954 a8083063 Iustin Pop
      dev_children = []
3955 a8083063 Iustin Pop
3956 a8083063 Iustin Pop
    data = {
3957 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
3958 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
3959 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
3960 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
3961 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
3962 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
3963 a8083063 Iustin Pop
      "children": dev_children,
3964 a8083063 Iustin Pop
      }
3965 a8083063 Iustin Pop
3966 a8083063 Iustin Pop
    return data
3967 a8083063 Iustin Pop
3968 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3969 a8083063 Iustin Pop
    """Gather and return data"""
3970 a8083063 Iustin Pop
    result = {}
3971 a8083063 Iustin Pop
    for instance in self.wanted_instances:
3972 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
3973 a8083063 Iustin Pop
                                                instance.name)
3974 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
3975 a8083063 Iustin Pop
        remote_state = "up"
3976 a8083063 Iustin Pop
      else:
3977 a8083063 Iustin Pop
        remote_state = "down"
3978 a8083063 Iustin Pop
      if instance.status == "down":
3979 a8083063 Iustin Pop
        config_state = "down"
3980 a8083063 Iustin Pop
      else:
3981 a8083063 Iustin Pop
        config_state = "up"
3982 a8083063 Iustin Pop
3983 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
3984 a8083063 Iustin Pop
               for device in instance.disks]
3985 a8083063 Iustin Pop
3986 a8083063 Iustin Pop
      idict = {
3987 a8083063 Iustin Pop
        "name": instance.name,
3988 a8083063 Iustin Pop
        "config_state": config_state,
3989 a8083063 Iustin Pop
        "run_state": remote_state,
3990 a8083063 Iustin Pop
        "pnode": instance.primary_node,
3991 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
3992 a8083063 Iustin Pop
        "os": instance.os,
3993 a8083063 Iustin Pop
        "memory": instance.memory,
3994 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
3995 a8083063 Iustin Pop
        "disks": disks,
3996 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
3997 a8083063 Iustin Pop
        }
3998 a8083063 Iustin Pop
3999 a8340917 Iustin Pop
      htkind = self.sstore.GetHypervisorType()
4000 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_PVM30:
4001 a8340917 Iustin Pop
        idict["kernel_path"] = instance.kernel_path
4002 a8340917 Iustin Pop
        idict["initrd_path"] = instance.initrd_path
4003 a8340917 Iustin Pop
4004 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_HVM31:
4005 a8340917 Iustin Pop
        idict["hvm_boot_order"] = instance.hvm_boot_order
4006 a8340917 Iustin Pop
        idict["hvm_acpi"] = instance.hvm_acpi
4007 a8340917 Iustin Pop
        idict["hvm_pae"] = instance.hvm_pae
4008 a8340917 Iustin Pop
        idict["hvm_cdrom_image_path"] = instance.hvm_cdrom_image_path
4009 a8340917 Iustin Pop
4010 a8340917 Iustin Pop
      if htkind in constants.HTS_REQ_PORT:
4011 a8340917 Iustin Pop
        idict["vnc_bind_address"] = instance.vnc_bind_address
4012 a8340917 Iustin Pop
        idict["network_port"] = instance.network_port
4013 a8340917 Iustin Pop
4014 a8083063 Iustin Pop
      result[instance.name] = idict
4015 a8083063 Iustin Pop
4016 a8083063 Iustin Pop
    return result
4017 a8083063 Iustin Pop
4018 a8083063 Iustin Pop
4019 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4020 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4021 a8083063 Iustin Pop

4022 a8083063 Iustin Pop
  """
4023 a8083063 Iustin Pop
  HPATH = "instance-modify"
4024 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4025 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4026 a8083063 Iustin Pop
4027 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4028 a8083063 Iustin Pop
    """Build hooks env.
4029 a8083063 Iustin Pop

4030 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4031 a8083063 Iustin Pop

4032 a8083063 Iustin Pop
    """
4033 396e1b78 Michael Hanselmann
    args = dict()
4034 a8083063 Iustin Pop
    if self.mem:
4035 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4036 a8083063 Iustin Pop
    if self.vcpus:
4037 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4038 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4039 396e1b78 Michael Hanselmann
      if self.do_ip:
4040 396e1b78 Michael Hanselmann
        ip = self.ip
4041 396e1b78 Michael Hanselmann
      else:
4042 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4043 396e1b78 Michael Hanselmann
      if self.bridge:
4044 396e1b78 Michael Hanselmann
        bridge = self.bridge
4045 396e1b78 Michael Hanselmann
      else:
4046 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4047 ef756965 Iustin Pop
      if self.mac:
4048 ef756965 Iustin Pop
        mac = self.mac
4049 ef756965 Iustin Pop
      else:
4050 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4051 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4052 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4053 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4054 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4055 a8083063 Iustin Pop
    return env, nl, nl
4056 a8083063 Iustin Pop
4057 a8083063 Iustin Pop
  def CheckPrereq(self):
4058 a8083063 Iustin Pop
    """Check prerequisites.
4059 a8083063 Iustin Pop

4060 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4061 a8083063 Iustin Pop

4062 a8083063 Iustin Pop
    """
4063 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4064 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4065 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4066 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4067 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4068 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4069 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4070 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4071 31a853d2 Iustin Pop
    self.hvm_acpi = getattr(self.op, "hvm_acpi", None)
4072 31a853d2 Iustin Pop
    self.hvm_pae = getattr(self.op, "hvm_pae", None)
4073 31a853d2 Iustin Pop
    self.hvm_cdrom_image_path = getattr(self.op, "hvm_cdrom_image_path", None)
4074 31a853d2 Iustin Pop
    self.vnc_bind_address = getattr(self.op, "vnc_bind_address", None)
4075 31a853d2 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4076 31a853d2 Iustin Pop
                 self.kernel_path, self.initrd_path, self.hvm_boot_order,
4077 31a853d2 Iustin Pop
                 self.hvm_acpi, self.hvm_pae, self.hvm_cdrom_image_path,
4078 31a853d2 Iustin Pop
                 self.vnc_bind_address]
4079 31a853d2 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4080 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4081 a8083063 Iustin Pop
    if self.mem is not None:
4082 a8083063 Iustin Pop
      try:
4083 a8083063 Iustin Pop
        self.mem = int(self.mem)
4084 a8083063 Iustin Pop
      except ValueError, err:
4085 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4086 a8083063 Iustin Pop
    if self.vcpus is not None:
4087 a8083063 Iustin Pop
      try:
4088 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4089 a8083063 Iustin Pop
      except ValueError, err:
4090 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4091 a8083063 Iustin Pop
    if self.ip is not None:
4092 a8083063 Iustin Pop
      self.do_ip = True
4093 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4094 a8083063 Iustin Pop
        self.ip = None
4095 a8083063 Iustin Pop
      else:
4096 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4097 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4098 a8083063 Iustin Pop
    else:
4099 a8083063 Iustin Pop
      self.do_ip = False
4100 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4101 1862d460 Alexander Schreiber
    if self.mac is not None:
4102 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4103 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4104 1862d460 Alexander Schreiber
                                   self.mac)
4105 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4106 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4107 a8083063 Iustin Pop
4108 973d7867 Iustin Pop
    if self.kernel_path is not None:
4109 973d7867 Iustin Pop
      self.do_kernel_path = True
4110 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4111 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4112 973d7867 Iustin Pop
4113 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4114 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4115 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4116 973d7867 Iustin Pop
                                    " filename")
4117 8cafeb26 Iustin Pop
    else:
4118 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4119 973d7867 Iustin Pop
4120 973d7867 Iustin Pop
    if self.initrd_path is not None:
4121 973d7867 Iustin Pop
      self.do_initrd_path = True
4122 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4123 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4124 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4125 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4126 973d7867 Iustin Pop
                                    " filename")
4127 8cafeb26 Iustin Pop
    else:
4128 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4129 973d7867 Iustin Pop
4130 25c5878d Alexander Schreiber
    # boot order verification
4131 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4132 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4133 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4134 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4135 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4136 25c5878d Alexander Schreiber
                                     " or 'default'")
4137 25c5878d Alexander Schreiber
4138 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
4139 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
4140 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
4141 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
4142 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
4143 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4144 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
4145 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
4146 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
4147 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
4148 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4149 31a853d2 Iustin Pop
4150 31a853d2 Iustin Pop
    # vnc_bind_address verification
4151 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
4152 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
4153 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
4154 31a853d2 Iustin Pop
                                   " like a valid IP address" %
4155 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
4156 31a853d2 Iustin Pop
4157 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4158 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
4159 a8083063 Iustin Pop
    if instance is None:
4160 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
4161 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4162 a8083063 Iustin Pop
    self.op.instance_name = instance.name
4163 a8083063 Iustin Pop
    self.instance = instance
4164 a8083063 Iustin Pop
    return
4165 a8083063 Iustin Pop
4166 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4167 a8083063 Iustin Pop
    """Modifies an instance.
4168 a8083063 Iustin Pop

4169 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4170 a8083063 Iustin Pop
    """
4171 a8083063 Iustin Pop
    result = []
4172 a8083063 Iustin Pop
    instance = self.instance
4173 a8083063 Iustin Pop
    if self.mem:
4174 a8083063 Iustin Pop
      instance.memory = self.mem
4175 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4176 a8083063 Iustin Pop
    if self.vcpus:
4177 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4178 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4179 a8083063 Iustin Pop
    if self.do_ip:
4180 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4181 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4182 a8083063 Iustin Pop
    if self.bridge:
4183 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4184 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4185 1862d460 Alexander Schreiber
    if self.mac:
4186 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4187 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4188 973d7867 Iustin Pop
    if self.do_kernel_path:
4189 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4190 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4191 973d7867 Iustin Pop
    if self.do_initrd_path:
4192 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4193 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4194 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4195 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4196 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4197 25c5878d Alexander Schreiber
      else:
4198 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4199 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4200 31a853d2 Iustin Pop
    if self.hvm_acpi:
4201 ec1ba002 Iustin Pop
      instance.hvm_acpi = self.hvm_acpi
4202 31a853d2 Iustin Pop
      result.append(("hvm_acpi", self.hvm_acpi))
4203 31a853d2 Iustin Pop
    if self.hvm_pae:
4204 ec1ba002 Iustin Pop
      instance.hvm_pae = self.hvm_pae
4205 31a853d2 Iustin Pop
      result.append(("hvm_pae", self.hvm_pae))
4206 31a853d2 Iustin Pop
    if self.hvm_cdrom_image_path:
4207 ec1ba002 Iustin Pop
      instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
4208 31a853d2 Iustin Pop
      result.append(("hvm_cdrom_image_path", self.hvm_cdrom_image_path))
4209 31a853d2 Iustin Pop
    if self.vnc_bind_address:
4210 31a853d2 Iustin Pop
      instance.vnc_bind_address = self.vnc_bind_address
4211 31a853d2 Iustin Pop
      result.append(("vnc_bind_address", self.vnc_bind_address))
4212 a8083063 Iustin Pop
4213 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
4214 a8083063 Iustin Pop
4215 a8083063 Iustin Pop
    return result
4216 a8083063 Iustin Pop
4217 a8083063 Iustin Pop
4218 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4219 a8083063 Iustin Pop
  """Query the exports list
4220 a8083063 Iustin Pop

4221 a8083063 Iustin Pop
  """
4222 a8083063 Iustin Pop
  _OP_REQP = []
4223 a8083063 Iustin Pop
4224 a8083063 Iustin Pop
  def CheckPrereq(self):
4225 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4226 a8083063 Iustin Pop

4227 a8083063 Iustin Pop
    """
4228 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4229 a8083063 Iustin Pop
4230 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4231 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4232 a8083063 Iustin Pop

4233 a8083063 Iustin Pop
    Returns:
4234 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4235 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4236 a8083063 Iustin Pop
      that node.
4237 a8083063 Iustin Pop

4238 a8083063 Iustin Pop
    """
4239 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4240 a8083063 Iustin Pop
4241 a8083063 Iustin Pop
4242 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4243 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4244 a8083063 Iustin Pop

4245 a8083063 Iustin Pop
  """
4246 a8083063 Iustin Pop
  HPATH = "instance-export"
4247 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4248 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4249 a8083063 Iustin Pop
4250 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4251 a8083063 Iustin Pop
    """Build hooks env.
4252 a8083063 Iustin Pop

4253 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4254 a8083063 Iustin Pop

4255 a8083063 Iustin Pop
    """
4256 a8083063 Iustin Pop
    env = {
4257 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4258 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4259 a8083063 Iustin Pop
      }
4260 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4261 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4262 a8083063 Iustin Pop
          self.op.target_node]
4263 a8083063 Iustin Pop
    return env, nl, nl
4264 a8083063 Iustin Pop
4265 a8083063 Iustin Pop
  def CheckPrereq(self):
4266 a8083063 Iustin Pop
    """Check prerequisites.
4267 a8083063 Iustin Pop

4268 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4269 a8083063 Iustin Pop

4270 a8083063 Iustin Pop
    """
4271 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4272 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4273 a8083063 Iustin Pop
    if self.instance is None:
4274 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4275 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4276 a8083063 Iustin Pop
4277 a8083063 Iustin Pop
    # node verification
4278 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4279 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4280 a8083063 Iustin Pop
4281 a8083063 Iustin Pop
    if self.dst_node is None:
4282 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4283 3ecf6786 Iustin Pop
                                 self.op.target_node)
4284 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4285 a8083063 Iustin Pop
4286 b6023d6c Manuel Franceschini
    # instance disk type verification
4287 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4288 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4289 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4290 b6023d6c Manuel Franceschini
                                   " file-based disks")
4291 b6023d6c Manuel Franceschini
4292 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4293 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4294 a8083063 Iustin Pop

4295 a8083063 Iustin Pop
    """
4296 a8083063 Iustin Pop
    instance = self.instance
4297 a8083063 Iustin Pop
    dst_node = self.dst_node
4298 a8083063 Iustin Pop
    src_node = instance.primary_node
4299 a8083063 Iustin Pop
    if self.op.shutdown:
4300 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4301 fb300fb7 Guido Trotter
      if not rpc.call_instance_shutdown(src_node, instance):
4302 fb300fb7 Guido Trotter
         raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4303 b4de68a9 Iustin Pop
                                  (instance.name, src_node))
4304 a8083063 Iustin Pop
4305 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4306 a8083063 Iustin Pop
4307 a8083063 Iustin Pop
    snap_disks = []
4308 a8083063 Iustin Pop
4309 a8083063 Iustin Pop
    try:
4310 a8083063 Iustin Pop
      for disk in instance.disks:
4311 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4312 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4313 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4314 a8083063 Iustin Pop
4315 a8083063 Iustin Pop
          if not new_dev_name:
4316 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4317 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4318 a8083063 Iustin Pop
          else:
4319 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4320 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4321 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4322 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4323 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4324 a8083063 Iustin Pop
4325 a8083063 Iustin Pop
    finally:
4326 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4327 fb300fb7 Guido Trotter
        if not rpc.call_instance_start(src_node, instance, None):
4328 fb300fb7 Guido Trotter
          _ShutdownInstanceDisks(instance, self.cfg)
4329 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4330 a8083063 Iustin Pop
4331 a8083063 Iustin Pop
    # TODO: check for size
4332 a8083063 Iustin Pop
4333 a8083063 Iustin Pop
    for dev in snap_disks:
4334 16687b98 Manuel Franceschini
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
4335 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4336 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4337 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4338 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4339 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4340 a8083063 Iustin Pop
4341 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4342 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4343 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4344 a8083063 Iustin Pop
4345 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4346 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4347 a8083063 Iustin Pop
4348 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4349 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4350 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4351 a8083063 Iustin Pop
    if nodelist:
4352 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4353 5bfac263 Iustin Pop
      exportlist = self.proc.ChainOpCode(op)
4354 a8083063 Iustin Pop
      for node in exportlist:
4355 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4356 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4357 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4358 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4359 5c947f38 Iustin Pop
4360 5c947f38 Iustin Pop
4361 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4362 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4363 9ac99fda Guido Trotter

4364 9ac99fda Guido Trotter
  """
4365 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4366 9ac99fda Guido Trotter
4367 9ac99fda Guido Trotter
  def CheckPrereq(self):
4368 9ac99fda Guido Trotter
    """Check prerequisites.
4369 9ac99fda Guido Trotter
    """
4370 9ac99fda Guido Trotter
    pass
4371 9ac99fda Guido Trotter
4372 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4373 9ac99fda Guido Trotter
    """Remove any export.
4374 9ac99fda Guido Trotter

4375 9ac99fda Guido Trotter
    """
4376 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4377 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4378 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4379 9ac99fda Guido Trotter
    fqdn_warn = False
4380 9ac99fda Guido Trotter
    if not instance_name:
4381 9ac99fda Guido Trotter
      fqdn_warn = True
4382 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4383 9ac99fda Guido Trotter
4384 9ac99fda Guido Trotter
    op = opcodes.OpQueryExports(nodes=[])
4385 9ac99fda Guido Trotter
    exportlist = self.proc.ChainOpCode(op)
4386 9ac99fda Guido Trotter
    found = False
4387 9ac99fda Guido Trotter
    for node in exportlist:
4388 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4389 9ac99fda Guido Trotter
        found = True
4390 9ac99fda Guido Trotter
        if not rpc.call_export_remove(node, instance_name):
4391 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4392 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4393 9ac99fda Guido Trotter
4394 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4395 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4396 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4397 9ac99fda Guido Trotter
                  " Domain Name.")
4398 9ac99fda Guido Trotter
4399 9ac99fda Guido Trotter
4400 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4401 5c947f38 Iustin Pop
  """Generic tags LU.
4402 5c947f38 Iustin Pop

4403 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4404 5c947f38 Iustin Pop

4405 5c947f38 Iustin Pop
  """
4406 5c947f38 Iustin Pop
  def CheckPrereq(self):
4407 5c947f38 Iustin Pop
    """Check prerequisites.
4408 5c947f38 Iustin Pop

4409 5c947f38 Iustin Pop
    """
4410 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4411 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4412 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4413 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4414 5c947f38 Iustin Pop
      if name is None:
4415 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4416 3ecf6786 Iustin Pop
                                   (self.op.name,))
4417 5c947f38 Iustin Pop
      self.op.name = name
4418 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4419 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4420 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4421 5c947f38 Iustin Pop
      if name is None:
4422 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4423 3ecf6786 Iustin Pop
                                   (self.op.name,))
4424 5c947f38 Iustin Pop
      self.op.name = name
4425 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4426 5c947f38 Iustin Pop
    else:
4427 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4428 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4429 5c947f38 Iustin Pop
4430 5c947f38 Iustin Pop
4431 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4432 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4433 5c947f38 Iustin Pop

4434 5c947f38 Iustin Pop
  """
4435 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4436 5c947f38 Iustin Pop
4437 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4438 5c947f38 Iustin Pop
    """Returns the tag list.
4439 5c947f38 Iustin Pop

4440 5c947f38 Iustin Pop
    """
4441 5c947f38 Iustin Pop
    return self.target.GetTags()
4442 5c947f38 Iustin Pop
4443 5c947f38 Iustin Pop
4444 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4445 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4446 73415719 Iustin Pop

4447 73415719 Iustin Pop
  """
4448 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4449 73415719 Iustin Pop
4450 73415719 Iustin Pop
  def CheckPrereq(self):
4451 73415719 Iustin Pop
    """Check prerequisites.
4452 73415719 Iustin Pop

4453 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4454 73415719 Iustin Pop

4455 73415719 Iustin Pop
    """
4456 73415719 Iustin Pop
    try:
4457 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4458 73415719 Iustin Pop
    except re.error, err:
4459 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4460 73415719 Iustin Pop
                                 (self.op.pattern, err))
4461 73415719 Iustin Pop
4462 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4463 73415719 Iustin Pop
    """Returns the tag list.
4464 73415719 Iustin Pop

4465 73415719 Iustin Pop
    """
4466 73415719 Iustin Pop
    cfg = self.cfg
4467 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4468 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4469 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4470 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4471 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4472 73415719 Iustin Pop
    results = []
4473 73415719 Iustin Pop
    for path, target in tgts:
4474 73415719 Iustin Pop
      for tag in target.GetTags():
4475 73415719 Iustin Pop
        if self.re.search(tag):
4476 73415719 Iustin Pop
          results.append((path, tag))
4477 73415719 Iustin Pop
    return results
4478 73415719 Iustin Pop
4479 73415719 Iustin Pop
4480 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4481 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4482 5c947f38 Iustin Pop

4483 5c947f38 Iustin Pop
  """
4484 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4485 5c947f38 Iustin Pop
4486 5c947f38 Iustin Pop
  def CheckPrereq(self):
4487 5c947f38 Iustin Pop
    """Check prerequisites.
4488 5c947f38 Iustin Pop

4489 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4490 5c947f38 Iustin Pop

4491 5c947f38 Iustin Pop
    """
4492 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4493 f27302fa Iustin Pop
    for tag in self.op.tags:
4494 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4495 5c947f38 Iustin Pop
4496 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4497 5c947f38 Iustin Pop
    """Sets the tag.
4498 5c947f38 Iustin Pop

4499 5c947f38 Iustin Pop
    """
4500 5c947f38 Iustin Pop
    try:
4501 f27302fa Iustin Pop
      for tag in self.op.tags:
4502 f27302fa Iustin Pop
        self.target.AddTag(tag)
4503 5c947f38 Iustin Pop
    except errors.TagError, err:
4504 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4505 5c947f38 Iustin Pop
    try:
4506 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4507 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4508 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4509 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4510 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4511 5c947f38 Iustin Pop
4512 5c947f38 Iustin Pop
4513 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4514 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4515 5c947f38 Iustin Pop

4516 5c947f38 Iustin Pop
  """
4517 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4518 5c947f38 Iustin Pop
4519 5c947f38 Iustin Pop
  def CheckPrereq(self):
4520 5c947f38 Iustin Pop
    """Check prerequisites.
4521 5c947f38 Iustin Pop

4522 5c947f38 Iustin Pop
    This checks that we have the given tag.
4523 5c947f38 Iustin Pop

4524 5c947f38 Iustin Pop
    """
4525 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4526 f27302fa Iustin Pop
    for tag in self.op.tags:
4527 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4528 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4529 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4530 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4531 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4532 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4533 f27302fa Iustin Pop
      diff_names.sort()
4534 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4535 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4536 5c947f38 Iustin Pop
4537 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4538 5c947f38 Iustin Pop
    """Remove the tag from the object.
4539 5c947f38 Iustin Pop

4540 5c947f38 Iustin Pop
    """
4541 f27302fa Iustin Pop
    for tag in self.op.tags:
4542 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4543 5c947f38 Iustin Pop
    try:
4544 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4545 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4546 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4547 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4548 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4549 06009e27 Iustin Pop
4550 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
4551 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
4552 06009e27 Iustin Pop

4553 06009e27 Iustin Pop
  This LU sleeps on the master and/or nodes for a specified amoutn of
4554 06009e27 Iustin Pop
  time.
4555 06009e27 Iustin Pop

4556 06009e27 Iustin Pop
  """
4557 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
4558 06009e27 Iustin Pop
4559 06009e27 Iustin Pop
  def CheckPrereq(self):
4560 06009e27 Iustin Pop
    """Check prerequisites.
4561 06009e27 Iustin Pop

4562 06009e27 Iustin Pop
    This checks that we have a good list of nodes and/or the duration
4563 06009e27 Iustin Pop
    is valid.
4564 06009e27 Iustin Pop

4565 06009e27 Iustin Pop
    """
4566 06009e27 Iustin Pop
4567 06009e27 Iustin Pop
    if self.op.on_nodes:
4568 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
4569 06009e27 Iustin Pop
4570 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
4571 06009e27 Iustin Pop
    """Do the actual sleep.
4572 06009e27 Iustin Pop

4573 06009e27 Iustin Pop
    """
4574 06009e27 Iustin Pop
    if self.op.on_master:
4575 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
4576 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
4577 06009e27 Iustin Pop
    if self.op.on_nodes:
4578 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
4579 06009e27 Iustin Pop
      if not result:
4580 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
4581 06009e27 Iustin Pop
      for node, node_result in result.items():
4582 06009e27 Iustin Pop
        if not node_result:
4583 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
4584 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
4585 d61df03e Iustin Pop
4586 d61df03e Iustin Pop
4587 d1c2dd75 Iustin Pop
class IAllocator(object):
4588 d1c2dd75 Iustin Pop
  """IAllocator framework.
4589 d61df03e Iustin Pop

4590 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
4591 d1c2dd75 Iustin Pop
    - cfg/sstore that are needed to query the cluster
4592 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
4593 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
4594 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
4595 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
4596 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
4597 d1c2dd75 Iustin Pop
      easy usage
4598 d61df03e Iustin Pop

4599 d61df03e Iustin Pop
  """
4600 29859cb7 Iustin Pop
  _ALLO_KEYS = [
4601 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
4602 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
4603 d1c2dd75 Iustin Pop
    ]
4604 29859cb7 Iustin Pop
  _RELO_KEYS = [
4605 29859cb7 Iustin Pop
    "relocate_from",
4606 29859cb7 Iustin Pop
    ]
4607 d1c2dd75 Iustin Pop
4608 29859cb7 Iustin Pop
  def __init__(self, cfg, sstore, mode, name, **kwargs):
4609 d1c2dd75 Iustin Pop
    self.cfg = cfg
4610 d1c2dd75 Iustin Pop
    self.sstore = sstore
4611 d1c2dd75 Iustin Pop
    # init buffer variables
4612 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
4613 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
4614 29859cb7 Iustin Pop
    self.mode = mode
4615 29859cb7 Iustin Pop
    self.name = name
4616 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
4617 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
4618 29859cb7 Iustin Pop
    self.relocate_from = None
4619 27579978 Iustin Pop
    # computed fields
4620 27579978 Iustin Pop
    self.required_nodes = None
4621 d1c2dd75 Iustin Pop
    # init result fields
4622 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
4623 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
4624 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
4625 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
4626 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
4627 29859cb7 Iustin Pop
    else:
4628 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
4629 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
4630 d1c2dd75 Iustin Pop
    for key in kwargs:
4631 29859cb7 Iustin Pop
      if key not in keyset:
4632 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
4633 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4634 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
4635 29859cb7 Iustin Pop
    for key in keyset:
4636 d1c2dd75 Iustin Pop
      if key not in kwargs:
4637 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
4638 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4639 d1c2dd75 Iustin Pop
    self._BuildInputData()
4640 d1c2dd75 Iustin Pop
4641 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
4642 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
4643 d1c2dd75 Iustin Pop

4644 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
4645 d1c2dd75 Iustin Pop

4646 d1c2dd75 Iustin Pop
    """
4647 d1c2dd75 Iustin Pop
    cfg = self.cfg
4648 d1c2dd75 Iustin Pop
    # cluster data
4649 d1c2dd75 Iustin Pop
    data = {
4650 d1c2dd75 Iustin Pop
      "version": 1,
4651 d1c2dd75 Iustin Pop
      "cluster_name": self.sstore.GetClusterName(),
4652 d1c2dd75 Iustin Pop
      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
4653 6286519f Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
4654 d1c2dd75 Iustin Pop
      # we don't have job IDs
4655 d61df03e Iustin Pop
      }
4656 d61df03e Iustin Pop
4657 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
4658 6286519f Iustin Pop
4659 d1c2dd75 Iustin Pop
    # node data
4660 d1c2dd75 Iustin Pop
    node_results = {}
4661 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
4662 d1c2dd75 Iustin Pop
    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
4663 d1c2dd75 Iustin Pop
    for nname in node_list:
4664 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
4665 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
4666 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
4667 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
4668 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
4669 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
4670 d1c2dd75 Iustin Pop
        if attr not in remote_info:
4671 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
4672 d1c2dd75 Iustin Pop
                                   (nname, attr))
4673 d1c2dd75 Iustin Pop
        try:
4674 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
4675 d1c2dd75 Iustin Pop
        except ValueError, err:
4676 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
4677 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
4678 6286519f Iustin Pop
      # compute memory used by primary instances
4679 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
4680 6286519f Iustin Pop
      for iinfo in i_list:
4681 6286519f Iustin Pop
        if iinfo.primary_node == nname:
4682 6286519f Iustin Pop
          i_p_mem += iinfo.memory
4683 6286519f Iustin Pop
          if iinfo.status == "up":
4684 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
4685 6286519f Iustin Pop
4686 b2662e7f Iustin Pop
      # compute memory used by instances
4687 d1c2dd75 Iustin Pop
      pnr = {
4688 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
4689 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
4690 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
4691 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
4692 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
4693 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
4694 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
4695 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
4696 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
4697 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
4698 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
4699 d1c2dd75 Iustin Pop
        }
4700 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
4701 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
4702 d1c2dd75 Iustin Pop
4703 d1c2dd75 Iustin Pop
    # instance data
4704 d1c2dd75 Iustin Pop
    instance_data = {}
4705 6286519f Iustin Pop
    for iinfo in i_list:
4706 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
4707 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
4708 d1c2dd75 Iustin Pop
      pir = {
4709 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
4710 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
4711 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
4712 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
4713 d1c2dd75 Iustin Pop
        "os": iinfo.os,
4714 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
4715 d1c2dd75 Iustin Pop
        "nics": nic_data,
4716 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
4717 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
4718 d1c2dd75 Iustin Pop
        }
4719 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
4720 d61df03e Iustin Pop
4721 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
4722 d61df03e Iustin Pop
4723 d1c2dd75 Iustin Pop
    self.in_data = data
4724 d61df03e Iustin Pop
4725 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
4726 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
4727 d61df03e Iustin Pop

4728 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
4729 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
4730 d61df03e Iustin Pop

4731 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
4732 d1c2dd75 Iustin Pop
    done.
4733 d61df03e Iustin Pop

4734 d1c2dd75 Iustin Pop
    """
4735 d1c2dd75 Iustin Pop
    data = self.in_data
4736 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
4737 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
4738 d1c2dd75 Iustin Pop
4739 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
4740 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
4741 d1c2dd75 Iustin Pop
4742 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
4743 27579978 Iustin Pop
      self.required_nodes = 2
4744 27579978 Iustin Pop
    else:
4745 27579978 Iustin Pop
      self.required_nodes = 1
4746 d1c2dd75 Iustin Pop
    request = {
4747 d1c2dd75 Iustin Pop
      "type": "allocate",
4748 d1c2dd75 Iustin Pop
      "name": self.name,
4749 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
4750 d1c2dd75 Iustin Pop
      "tags": self.tags,
4751 d1c2dd75 Iustin Pop
      "os": self.os,
4752 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
4753 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
4754 d1c2dd75 Iustin Pop
      "disks": self.disks,
4755 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
4756 d1c2dd75 Iustin Pop
      "nics": self.nics,
4757 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
4758 d1c2dd75 Iustin Pop
      }
4759 d1c2dd75 Iustin Pop
    data["request"] = request
4760 298fe380 Iustin Pop
4761 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
4762 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
4763 298fe380 Iustin Pop

4764 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
4765 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
4766 d61df03e Iustin Pop

4767 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
4768 d1c2dd75 Iustin Pop
    done.
4769 d61df03e Iustin Pop

4770 d1c2dd75 Iustin Pop
    """
4771 27579978 Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.name)
4772 27579978 Iustin Pop
    if instance is None:
4773 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
4774 27579978 Iustin Pop
                                   " IAllocator" % self.name)
4775 27579978 Iustin Pop
4776 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4777 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
4778 27579978 Iustin Pop
4779 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
4780 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
4781 2a139bb0 Iustin Pop
4782 27579978 Iustin Pop
    self.required_nodes = 1
4783 27579978 Iustin Pop
4784 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
4785 27579978 Iustin Pop
                                  instance.disks[0].size,
4786 27579978 Iustin Pop
                                  instance.disks[1].size)
4787 27579978 Iustin Pop
4788 d1c2dd75 Iustin Pop
    request = {
4789 2a139bb0 Iustin Pop
      "type": "relocate",
4790 d1c2dd75 Iustin Pop
      "name": self.name,
4791 27579978 Iustin Pop
      "disk_space_total": disk_space,
4792 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
4793 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
4794 d1c2dd75 Iustin Pop
      }
4795 27579978 Iustin Pop
    self.in_data["request"] = request
4796 d61df03e Iustin Pop
4797 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
4798 d1c2dd75 Iustin Pop
    """Build input data structures.
4799 d61df03e Iustin Pop

4800 d1c2dd75 Iustin Pop
    """
4801 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
4802 d61df03e Iustin Pop
4803 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
4804 d1c2dd75 Iustin Pop
      self._AddNewInstance()
4805 d1c2dd75 Iustin Pop
    else:
4806 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
4807 d61df03e Iustin Pop
4808 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
4809 d61df03e Iustin Pop
4810 8d528b7c Iustin Pop
  def Run(self, name, validate=True, call_fn=rpc.call_iallocator_runner):
4811 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
4812 298fe380 Iustin Pop

4813 d1c2dd75 Iustin Pop
    """
4814 d1c2dd75 Iustin Pop
    data = self.in_text
4815 298fe380 Iustin Pop
4816 8d528b7c Iustin Pop
    result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
4817 298fe380 Iustin Pop
4818 8d528b7c Iustin Pop
    if not isinstance(result, tuple) or len(result) != 4:
4819 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
4820 8d528b7c Iustin Pop
4821 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
4822 8d528b7c Iustin Pop
4823 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
4824 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
4825 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
4826 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Instance allocator call failed: %s,"
4827 d1c2dd75 Iustin Pop
                                 " output: %s" %
4828 8d528b7c Iustin Pop
                                 (fail, stdout+stderr))
4829 8d528b7c Iustin Pop
    self.out_text = stdout
4830 d1c2dd75 Iustin Pop
    if validate:
4831 d1c2dd75 Iustin Pop
      self._ValidateResult()
4832 298fe380 Iustin Pop
4833 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
4834 d1c2dd75 Iustin Pop
    """Process the allocator results.
4835 538475ca Iustin Pop

4836 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
4837 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
4838 538475ca Iustin Pop

4839 d1c2dd75 Iustin Pop
    """
4840 d1c2dd75 Iustin Pop
    try:
4841 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
4842 d1c2dd75 Iustin Pop
    except Exception, err:
4843 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
4844 d1c2dd75 Iustin Pop
4845 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
4846 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
4847 538475ca Iustin Pop
4848 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
4849 d1c2dd75 Iustin Pop
      if key not in rdict:
4850 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
4851 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
4852 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
4853 538475ca Iustin Pop
4854 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
4855 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
4856 d1c2dd75 Iustin Pop
                               " is not a list")
4857 d1c2dd75 Iustin Pop
    self.out_data = rdict
4858 538475ca Iustin Pop
4859 538475ca Iustin Pop
4860 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
4861 d61df03e Iustin Pop
  """Run allocator tests.
4862 d61df03e Iustin Pop

4863 d61df03e Iustin Pop
  This LU runs the allocator tests
4864 d61df03e Iustin Pop

4865 d61df03e Iustin Pop
  """
4866 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
4867 d61df03e Iustin Pop
4868 d61df03e Iustin Pop
  def CheckPrereq(self):
4869 d61df03e Iustin Pop
    """Check prerequisites.
4870 d61df03e Iustin Pop

4871 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
4872 d61df03e Iustin Pop

4873 d61df03e Iustin Pop
    """
4874 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
4875 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
4876 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
4877 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
4878 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
4879 d61df03e Iustin Pop
                                     attr)
4880 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
4881 d61df03e Iustin Pop
      if iname is not None:
4882 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
4883 d61df03e Iustin Pop
                                   iname)
4884 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
4885 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
4886 d61df03e Iustin Pop
      for row in self.op.nics:
4887 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
4888 d61df03e Iustin Pop
            "mac" not in row or
4889 d61df03e Iustin Pop
            "ip" not in row or
4890 d61df03e Iustin Pop
            "bridge" not in row):
4891 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
4892 d61df03e Iustin Pop
                                     " 'nics' parameter")
4893 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
4894 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
4895 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
4896 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
4897 d61df03e Iustin Pop
      for row in self.op.disks:
4898 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
4899 d61df03e Iustin Pop
            "size" not in row or
4900 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
4901 d61df03e Iustin Pop
            "mode" not in row or
4902 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
4903 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
4904 d61df03e Iustin Pop
                                     " 'disks' parameter")
4905 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
4906 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
4907 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
4908 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
4909 d61df03e Iustin Pop
      if fname is None:
4910 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
4911 d61df03e Iustin Pop
                                   self.op.name)
4912 d61df03e Iustin Pop
      self.op.name = fname
4913 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
4914 d61df03e Iustin Pop
    else:
4915 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
4916 d61df03e Iustin Pop
                                 self.op.mode)
4917 d61df03e Iustin Pop
4918 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
4919 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
4920 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
4921 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
4922 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
4923 d61df03e Iustin Pop
                                 self.op.direction)
4924 d61df03e Iustin Pop
4925 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
4926 d61df03e Iustin Pop
    """Run the allocator test.
4927 d61df03e Iustin Pop

4928 d61df03e Iustin Pop
    """
4929 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
4930 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
4931 29859cb7 Iustin Pop
                       mode=self.op.mode,
4932 29859cb7 Iustin Pop
                       name=self.op.name,
4933 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
4934 29859cb7 Iustin Pop
                       disks=self.op.disks,
4935 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
4936 29859cb7 Iustin Pop
                       os=self.op.os,
4937 29859cb7 Iustin Pop
                       tags=self.op.tags,
4938 29859cb7 Iustin Pop
                       nics=self.op.nics,
4939 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
4940 29859cb7 Iustin Pop
                       )
4941 29859cb7 Iustin Pop
    else:
4942 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
4943 29859cb7 Iustin Pop
                       mode=self.op.mode,
4944 29859cb7 Iustin Pop
                       name=self.op.name,
4945 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
4946 29859cb7 Iustin Pop
                       )
4947 d61df03e Iustin Pop
4948 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
4949 d1c2dd75 Iustin Pop
      result = ial.in_text
4950 298fe380 Iustin Pop
    else:
4951 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
4952 d1c2dd75 Iustin Pop
      result = ial.out_text
4953 298fe380 Iustin Pop
    return result