Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 4e713df6

History | View | Annotate | Download (176.6 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 a8083063 Iustin Pop
from ganeti import config
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 a8083063 Iustin Pop
from ganeti import ssconf
45 8d14b30d Iustin Pop
from ganeti import serializer
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
53 a8083063 Iustin Pop
      with all the fields (even if as None)
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
58 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
59 a8083063 Iustin Pop

60 a8083063 Iustin Pop
  """
61 a8083063 Iustin Pop
  HPATH = None
62 a8083063 Iustin Pop
  HTYPE = None
63 a8083063 Iustin Pop
  _OP_REQP = []
64 a8083063 Iustin Pop
  REQ_CLUSTER = True
65 a8083063 Iustin Pop
  REQ_MASTER = True
66 a8083063 Iustin Pop
67 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
68 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
69 a8083063 Iustin Pop

70 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
71 a8083063 Iustin Pop
    validity.
72 a8083063 Iustin Pop

73 a8083063 Iustin Pop
    """
74 5bfac263 Iustin Pop
    self.proc = processor
75 a8083063 Iustin Pop
    self.op = op
76 a8083063 Iustin Pop
    self.cfg = cfg
77 a8083063 Iustin Pop
    self.sstore = sstore
78 c92b310a Michael Hanselmann
    self.__ssh = None
79 c92b310a Michael Hanselmann
80 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
81 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
82 a8083063 Iustin Pop
      if attr_val is None:
83 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
84 3ecf6786 Iustin Pop
                                   attr_name)
85 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
86 a8083063 Iustin Pop
      if not cfg.IsCluster():
87 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
88 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
89 a8083063 Iustin Pop
      if self.REQ_MASTER:
90 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
91 89e1fc26 Iustin Pop
        if master != utils.HostInfo().name:
92 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
93 3ecf6786 Iustin Pop
                                     " node %s" % master)
94 a8083063 Iustin Pop
95 c92b310a Michael Hanselmann
  def __GetSSH(self):
96 c92b310a Michael Hanselmann
    """Returns the SshRunner object
97 c92b310a Michael Hanselmann

98 c92b310a Michael Hanselmann
    """
99 c92b310a Michael Hanselmann
    if not self.__ssh:
100 1ff08570 Michael Hanselmann
      self.__ssh = ssh.SshRunner(self.sstore)
101 c92b310a Michael Hanselmann
    return self.__ssh
102 c92b310a Michael Hanselmann
103 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
104 c92b310a Michael Hanselmann
105 a8083063 Iustin Pop
  def CheckPrereq(self):
106 a8083063 Iustin Pop
    """Check prerequisites for this LU.
107 a8083063 Iustin Pop

108 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
109 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
110 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
111 a8083063 Iustin Pop
    allowed.
112 a8083063 Iustin Pop

113 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
114 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
115 a8083063 Iustin Pop

116 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
117 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
118 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
119 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
120 a8083063 Iustin Pop

121 a8083063 Iustin Pop
    """
122 a8083063 Iustin Pop
    raise NotImplementedError
123 a8083063 Iustin Pop
124 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
125 a8083063 Iustin Pop
    """Execute the LU.
126 a8083063 Iustin Pop

127 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
128 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
129 a8083063 Iustin Pop
    code, or expected.
130 a8083063 Iustin Pop

131 a8083063 Iustin Pop
    """
132 a8083063 Iustin Pop
    raise NotImplementedError
133 a8083063 Iustin Pop
134 a8083063 Iustin Pop
  def BuildHooksEnv(self):
135 a8083063 Iustin Pop
    """Build hooks environment for this LU.
136 a8083063 Iustin Pop

137 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
138 a8083063 Iustin Pop
    containing the environment that will be used for running the
139 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
140 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
141 a8083063 Iustin Pop
    the hook should run after the execution.
142 a8083063 Iustin Pop

143 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
144 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
145 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
146 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
147 a8083063 Iustin Pop

148 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
149 a8083063 Iustin Pop

150 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
151 a8083063 Iustin Pop
    not be called.
152 a8083063 Iustin Pop

153 a8083063 Iustin Pop
    """
154 a8083063 Iustin Pop
    raise NotImplementedError
155 a8083063 Iustin Pop
156 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
157 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
158 1fce5219 Guido Trotter

159 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
160 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
161 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
162 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
163 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
164 1fce5219 Guido Trotter

165 1fce5219 Guido Trotter
    Args:
166 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
167 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
168 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
169 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
170 1fce5219 Guido Trotter

171 1fce5219 Guido Trotter
    """
172 1fce5219 Guido Trotter
    return lu_result
173 1fce5219 Guido Trotter
174 a8083063 Iustin Pop
175 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
176 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
177 a8083063 Iustin Pop

178 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
179 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
180 a8083063 Iustin Pop

181 a8083063 Iustin Pop
  """
182 a8083063 Iustin Pop
  HPATH = None
183 a8083063 Iustin Pop
  HTYPE = None
184 a8083063 Iustin Pop
185 a8083063 Iustin Pop
186 9440aeab Michael Hanselmann
def _AddHostToEtcHosts(hostname):
187 9440aeab Michael Hanselmann
  """Wrapper around utils.SetEtcHostsEntry.
188 9440aeab Michael Hanselmann

189 9440aeab Michael Hanselmann
  """
190 9440aeab Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
191 9440aeab Michael Hanselmann
  utils.SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
192 9440aeab Michael Hanselmann
193 9440aeab Michael Hanselmann
194 c8a0948f Michael Hanselmann
def _RemoveHostFromEtcHosts(hostname):
195 9440aeab Michael Hanselmann
  """Wrapper around utils.RemoveEtcHostsEntry.
196 c8a0948f Michael Hanselmann

197 c8a0948f Michael Hanselmann
  """
198 c8a0948f Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
199 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
200 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
201 c8a0948f Michael Hanselmann
202 c8a0948f Michael Hanselmann
203 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
204 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
205 83120a01 Michael Hanselmann

206 83120a01 Michael Hanselmann
  Args:
207 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
208 83120a01 Michael Hanselmann

209 83120a01 Michael Hanselmann
  """
210 3312b702 Iustin Pop
  if not isinstance(nodes, list):
211 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
212 dcb93971 Michael Hanselmann
213 dcb93971 Michael Hanselmann
  if nodes:
214 3312b702 Iustin Pop
    wanted = []
215 dcb93971 Michael Hanselmann
216 dcb93971 Michael Hanselmann
    for name in nodes:
217 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
218 dcb93971 Michael Hanselmann
      if node is None:
219 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
220 3312b702 Iustin Pop
      wanted.append(node)
221 dcb93971 Michael Hanselmann
222 dcb93971 Michael Hanselmann
  else:
223 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
224 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
225 3312b702 Iustin Pop
226 3312b702 Iustin Pop
227 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
228 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
229 3312b702 Iustin Pop

230 3312b702 Iustin Pop
  Args:
231 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
232 3312b702 Iustin Pop

233 3312b702 Iustin Pop
  """
234 3312b702 Iustin Pop
  if not isinstance(instances, list):
235 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
236 3312b702 Iustin Pop
237 3312b702 Iustin Pop
  if instances:
238 3312b702 Iustin Pop
    wanted = []
239 3312b702 Iustin Pop
240 3312b702 Iustin Pop
    for name in instances:
241 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
242 3312b702 Iustin Pop
      if instance is None:
243 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
244 3312b702 Iustin Pop
      wanted.append(instance)
245 3312b702 Iustin Pop
246 3312b702 Iustin Pop
  else:
247 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
248 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
249 dcb93971 Michael Hanselmann
250 dcb93971 Michael Hanselmann
251 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
252 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
253 83120a01 Michael Hanselmann

254 83120a01 Michael Hanselmann
  Args:
255 83120a01 Michael Hanselmann
    static: Static fields
256 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
257 83120a01 Michael Hanselmann

258 83120a01 Michael Hanselmann
  """
259 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
260 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
261 dcb93971 Michael Hanselmann
262 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
263 dcb93971 Michael Hanselmann
264 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
265 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
266 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
267 3ecf6786 Iustin Pop
                                          difference(all_fields)))
268 dcb93971 Michael Hanselmann
269 dcb93971 Michael Hanselmann
270 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
271 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
272 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
273 ecb215b5 Michael Hanselmann

274 ecb215b5 Michael Hanselmann
  Args:
275 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
276 396e1b78 Michael Hanselmann
  """
277 396e1b78 Michael Hanselmann
  env = {
278 0e137c28 Iustin Pop
    "OP_TARGET": name,
279 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
280 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
281 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
282 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
283 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
284 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
285 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
286 396e1b78 Michael Hanselmann
  }
287 396e1b78 Michael Hanselmann
288 396e1b78 Michael Hanselmann
  if nics:
289 396e1b78 Michael Hanselmann
    nic_count = len(nics)
290 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
291 396e1b78 Michael Hanselmann
      if ip is None:
292 396e1b78 Michael Hanselmann
        ip = ""
293 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
294 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
295 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
296 396e1b78 Michael Hanselmann
  else:
297 396e1b78 Michael Hanselmann
    nic_count = 0
298 396e1b78 Michael Hanselmann
299 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
300 396e1b78 Michael Hanselmann
301 396e1b78 Michael Hanselmann
  return env
302 396e1b78 Michael Hanselmann
303 396e1b78 Michael Hanselmann
304 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
305 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
306 ecb215b5 Michael Hanselmann

307 ecb215b5 Michael Hanselmann
  Args:
308 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
309 ecb215b5 Michael Hanselmann
    override: dict of values to override
310 ecb215b5 Michael Hanselmann
  """
311 396e1b78 Michael Hanselmann
  args = {
312 396e1b78 Michael Hanselmann
    'name': instance.name,
313 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
314 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
315 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
316 396e1b78 Michael Hanselmann
    'status': instance.os,
317 396e1b78 Michael Hanselmann
    'memory': instance.memory,
318 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
319 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
320 396e1b78 Michael Hanselmann
  }
321 396e1b78 Michael Hanselmann
  if override:
322 396e1b78 Michael Hanselmann
    args.update(override)
323 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
324 396e1b78 Michael Hanselmann
325 396e1b78 Michael Hanselmann
326 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
327 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
328 a8083063 Iustin Pop

329 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
330 a8083063 Iustin Pop
  is the error message.
331 a8083063 Iustin Pop

332 a8083063 Iustin Pop
  """
333 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
334 a8083063 Iustin Pop
  if vgsize is None:
335 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
336 a8083063 Iustin Pop
  elif vgsize < 20480:
337 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
338 191a8385 Guido Trotter
            (vgname, vgsize))
339 a8083063 Iustin Pop
  return None
340 a8083063 Iustin Pop
341 a8083063 Iustin Pop
342 a8083063 Iustin Pop
def _InitSSHSetup(node):
343 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
344 a8083063 Iustin Pop

345 a8083063 Iustin Pop

346 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
347 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
348 a8083063 Iustin Pop

349 a8083063 Iustin Pop
  Args:
350 a8083063 Iustin Pop
    node: the name of this host as a fqdn
351 a8083063 Iustin Pop

352 a8083063 Iustin Pop
  """
353 70d9e3d8 Iustin Pop
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
354 a8083063 Iustin Pop
355 70d9e3d8 Iustin Pop
  for name in priv_key, pub_key:
356 70d9e3d8 Iustin Pop
    if os.path.exists(name):
357 70d9e3d8 Iustin Pop
      utils.CreateBackup(name)
358 70d9e3d8 Iustin Pop
    utils.RemoveFile(name)
359 a8083063 Iustin Pop
360 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
361 70d9e3d8 Iustin Pop
                         "-f", priv_key,
362 a8083063 Iustin Pop
                         "-q", "-N", ""])
363 a8083063 Iustin Pop
  if result.failed:
364 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
365 3ecf6786 Iustin Pop
                             result.output)
366 a8083063 Iustin Pop
367 70d9e3d8 Iustin Pop
  f = open(pub_key, 'r')
368 a8083063 Iustin Pop
  try:
369 70d9e3d8 Iustin Pop
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
370 a8083063 Iustin Pop
  finally:
371 a8083063 Iustin Pop
    f.close()
372 a8083063 Iustin Pop
373 a8083063 Iustin Pop
374 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
375 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
376 a8083063 Iustin Pop

377 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
378 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
379 a8083063 Iustin Pop

380 a8083063 Iustin Pop
  """
381 a8083063 Iustin Pop
  # Create pseudo random password
382 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
383 a8083063 Iustin Pop
  # and write it into sstore
384 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
385 a8083063 Iustin Pop
386 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
387 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
388 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
389 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
390 a8083063 Iustin Pop
  if result.failed:
391 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
392 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
393 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
394 a8083063 Iustin Pop
395 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
396 a8083063 Iustin Pop
397 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
398 a8083063 Iustin Pop
399 a8083063 Iustin Pop
  if result.failed:
400 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
401 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
402 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
403 a8083063 Iustin Pop
404 a8083063 Iustin Pop
405 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
406 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
407 bf6929a2 Alexander Schreiber

408 bf6929a2 Alexander Schreiber
  """
409 bf6929a2 Alexander Schreiber
  # check bridges existance
410 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
411 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
412 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
413 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
414 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
415 bf6929a2 Alexander Schreiber
416 bf6929a2 Alexander Schreiber
417 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
418 a8083063 Iustin Pop
  """Initialise the cluster.
419 a8083063 Iustin Pop

420 a8083063 Iustin Pop
  """
421 a8083063 Iustin Pop
  HPATH = "cluster-init"
422 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
423 efa14262 Manuel Franceschini
  _OP_REQP = ["cluster_name", "hypervisor_type", "mac_prefix",
424 871705db Manuel Franceschini
              "def_bridge", "master_netdev", "file_storage_dir"]
425 a8083063 Iustin Pop
  REQ_CLUSTER = False
426 a8083063 Iustin Pop
427 a8083063 Iustin Pop
  def BuildHooksEnv(self):
428 a8083063 Iustin Pop
    """Build hooks env.
429 a8083063 Iustin Pop

430 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
431 a8083063 Iustin Pop
    ourselves in the post-run node list.
432 a8083063 Iustin Pop

433 a8083063 Iustin Pop
    """
434 0e137c28 Iustin Pop
    env = {"OP_TARGET": self.op.cluster_name}
435 0e137c28 Iustin Pop
    return env, [], [self.hostname.name]
436 a8083063 Iustin Pop
437 a8083063 Iustin Pop
  def CheckPrereq(self):
438 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
439 a8083063 Iustin Pop

440 a8083063 Iustin Pop
    """
441 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
442 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
443 a8083063 Iustin Pop
444 2a6469d5 Alexander Schreiber
    if self.op.hypervisor_type == constants.HT_XEN_HVM31:
445 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
446 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Please prepare the cluster VNC"
447 2a6469d5 Alexander Schreiber
                                   "password file %s" %
448 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
449 2a6469d5 Alexander Schreiber
450 89e1fc26 Iustin Pop
    self.hostname = hostname = utils.HostInfo()
451 ff98055b Iustin Pop
452 bcf043c9 Iustin Pop
    if hostname.ip.startswith("127."):
453 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
454 107711b0 Michael Hanselmann
                                 " range (%s). Please fix DNS or %s." %
455 107711b0 Michael Hanselmann
                                 (hostname.ip, constants.ETC_HOSTS))
456 130e907e Iustin Pop
457 b15d625f Iustin Pop
    if not utils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT,
458 b15d625f Iustin Pop
                         source=constants.LOCALHOST_IP_ADDRESS):
459 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
460 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
461 3ecf6786 Iustin Pop
                                 " belong to this host."
462 bcf043c9 Iustin Pop
                                 " Aborting." % hostname.ip)
463 a8083063 Iustin Pop
464 411f8ad0 Iustin Pop
    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
465 411f8ad0 Iustin Pop
466 411f8ad0 Iustin Pop
    if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
467 411f8ad0 Iustin Pop
                     timeout=5):
468 411f8ad0 Iustin Pop
      raise errors.OpPrereqError("Cluster IP already active. Aborting.")
469 411f8ad0 Iustin Pop
470 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
471 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
472 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
473 16abfbc2 Alexander Schreiber
    if (secondary_ip and
474 16abfbc2 Alexander Schreiber
        secondary_ip != hostname.ip and
475 b15d625f Iustin Pop
        (not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
476 b15d625f Iustin Pop
                           source=constants.LOCALHOST_IP_ADDRESS))):
477 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("You gave %s as secondary IP,"
478 f4bc1f2c Michael Hanselmann
                                 " but it does not belong to this host." %
479 16abfbc2 Alexander Schreiber
                                 secondary_ip)
480 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
481 a8083063 Iustin Pop
482 efa14262 Manuel Franceschini
    if not hasattr(self.op, "vg_name"):
483 efa14262 Manuel Franceschini
      self.op.vg_name = None
484 efa14262 Manuel Franceschini
    # if vg_name not None, checks if volume group is valid
485 efa14262 Manuel Franceschini
    if self.op.vg_name:
486 efa14262 Manuel Franceschini
      vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
487 efa14262 Manuel Franceschini
      if vgstatus:
488 efa14262 Manuel Franceschini
        raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
489 efa14262 Manuel Franceschini
                                   " you are not using lvm" % vgstatus)
490 a8083063 Iustin Pop
491 2872a949 Manuel Franceschini
    self.op.file_storage_dir = os.path.normpath(self.op.file_storage_dir)
492 2872a949 Manuel Franceschini
493 871705db Manuel Franceschini
    if not os.path.isabs(self.op.file_storage_dir):
494 871705db Manuel Franceschini
      raise errors.OpPrereqError("The file storage directory you have is"
495 871705db Manuel Franceschini
                                 " not an absolute path.")
496 871705db Manuel Franceschini
497 871705db Manuel Franceschini
    if not os.path.exists(self.op.file_storage_dir):
498 2872a949 Manuel Franceschini
      try:
499 2872a949 Manuel Franceschini
        os.makedirs(self.op.file_storage_dir, 0750)
500 2872a949 Manuel Franceschini
      except OSError, err:
501 2872a949 Manuel Franceschini
        raise errors.OpPrereqError("Cannot create file storage directory"
502 2872a949 Manuel Franceschini
                                   " '%s': %s" %
503 2872a949 Manuel Franceschini
                                   (self.op.file_storage_dir, err))
504 2872a949 Manuel Franceschini
505 2872a949 Manuel Franceschini
    if not os.path.isdir(self.op.file_storage_dir):
506 2872a949 Manuel Franceschini
      raise errors.OpPrereqError("The file storage directory '%s' is not"
507 2872a949 Manuel Franceschini
                                 " a directory." % self.op.file_storage_dir)
508 871705db Manuel Franceschini
509 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
510 a8083063 Iustin Pop
                    self.op.mac_prefix):
511 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
512 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
513 a8083063 Iustin Pop
514 2584d4a4 Alexander Schreiber
    if self.op.hypervisor_type not in constants.HYPER_TYPES:
515 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
516 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
517 a8083063 Iustin Pop
518 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
519 880478f8 Iustin Pop
    if result.failed:
520 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
521 8925faaa Iustin Pop
                                 (self.op.master_netdev,
522 8925faaa Iustin Pop
                                  result.output.strip()))
523 880478f8 Iustin Pop
524 7dd30006 Michael Hanselmann
    if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
525 7dd30006 Michael Hanselmann
            os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
526 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("Init.d script '%s' missing or not"
527 f4bc1f2c Michael Hanselmann
                                 " executable." % constants.NODE_INITD_SCRIPT)
528 c7b46d59 Iustin Pop
529 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
530 a8083063 Iustin Pop
    """Initialize the cluster.
531 a8083063 Iustin Pop

532 a8083063 Iustin Pop
    """
533 a8083063 Iustin Pop
    clustername = self.clustername
534 a8083063 Iustin Pop
    hostname = self.hostname
535 a8083063 Iustin Pop
536 a8083063 Iustin Pop
    # set up the simple store
537 4167825b Iustin Pop
    self.sstore = ss = ssconf.SimpleStore()
538 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
539 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
540 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
541 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
542 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
543 871705db Manuel Franceschini
    ss.SetKey(ss.SS_FILE_STORAGE_DIR, self.op.file_storage_dir)
544 243cdbcc Michael Hanselmann
    ss.SetKey(ss.SS_CONFIG_VERSION, constants.CONFIG_VERSION)
545 a8083063 Iustin Pop
546 a8083063 Iustin Pop
    # set up the inter-node password and certificate
547 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
548 a8083063 Iustin Pop
549 a8083063 Iustin Pop
    # start the master ip
550 bcf043c9 Iustin Pop
    rpc.call_node_start_master(hostname.name)
551 a8083063 Iustin Pop
552 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
553 70d9e3d8 Iustin Pop
    f = open(constants.SSH_HOST_RSA_PUB, 'r')
554 a8083063 Iustin Pop
    try:
555 a8083063 Iustin Pop
      sshline = f.read()
556 a8083063 Iustin Pop
    finally:
557 a8083063 Iustin Pop
      f.close()
558 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
559 a8083063 Iustin Pop
560 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(hostname.name)
561 bcf043c9 Iustin Pop
    _InitSSHSetup(hostname.name)
562 a8083063 Iustin Pop
563 a8083063 Iustin Pop
    # init of cluster config file
564 4167825b Iustin Pop
    self.cfg = cfgw = config.ConfigWriter()
565 bcf043c9 Iustin Pop
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
566 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
567 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
568 a8083063 Iustin Pop
569 f408b346 Michael Hanselmann
    ssh.WriteKnownHostsFile(cfgw, ss, constants.SSH_KNOWN_HOSTS_FILE)
570 f408b346 Michael Hanselmann
571 a8083063 Iustin Pop
572 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
573 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
574 a8083063 Iustin Pop

575 a8083063 Iustin Pop
  """
576 a8083063 Iustin Pop
  _OP_REQP = []
577 a8083063 Iustin Pop
578 a8083063 Iustin Pop
  def CheckPrereq(self):
579 a8083063 Iustin Pop
    """Check prerequisites.
580 a8083063 Iustin Pop

581 a8083063 Iustin Pop
    This checks whether the cluster is empty.
582 a8083063 Iustin Pop

583 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
584 a8083063 Iustin Pop

585 a8083063 Iustin Pop
    """
586 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
587 a8083063 Iustin Pop
588 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
589 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
590 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
591 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
592 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
593 db915bd1 Michael Hanselmann
    if instancelist:
594 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
595 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
596 a8083063 Iustin Pop
597 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
598 a8083063 Iustin Pop
    """Destroys the cluster.
599 a8083063 Iustin Pop

600 a8083063 Iustin Pop
    """
601 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
602 c9064964 Iustin Pop
    if not rpc.call_node_stop_master(master):
603 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
604 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
605 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
606 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
607 c8a0948f Michael Hanselmann
    rpc.call_node_leave_cluster(master)
608 a8083063 Iustin Pop
609 a8083063 Iustin Pop
610 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
611 a8083063 Iustin Pop
  """Verifies the cluster status.
612 a8083063 Iustin Pop

613 a8083063 Iustin Pop
  """
614 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
615 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
616 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
617 a8083063 Iustin Pop
618 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
619 a8083063 Iustin Pop
                  remote_version, feedback_fn):
620 a8083063 Iustin Pop
    """Run multiple tests against a node.
621 a8083063 Iustin Pop

622 a8083063 Iustin Pop
    Test list:
623 a8083063 Iustin Pop
      - compares ganeti version
624 a8083063 Iustin Pop
      - checks vg existance and size > 20G
625 a8083063 Iustin Pop
      - checks config file checksum
626 a8083063 Iustin Pop
      - checks ssh to other nodes
627 a8083063 Iustin Pop

628 a8083063 Iustin Pop
    Args:
629 a8083063 Iustin Pop
      node: name of the node to check
630 a8083063 Iustin Pop
      file_list: required list of files
631 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
632 098c0958 Michael Hanselmann

633 a8083063 Iustin Pop
    """
634 a8083063 Iustin Pop
    # compares ganeti version
635 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
636 a8083063 Iustin Pop
    if not remote_version:
637 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
638 a8083063 Iustin Pop
      return True
639 a8083063 Iustin Pop
640 a8083063 Iustin Pop
    if local_version != remote_version:
641 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
642 a8083063 Iustin Pop
                      (local_version, node, remote_version))
643 a8083063 Iustin Pop
      return True
644 a8083063 Iustin Pop
645 a8083063 Iustin Pop
    # checks vg existance and size > 20G
646 a8083063 Iustin Pop
647 a8083063 Iustin Pop
    bad = False
648 a8083063 Iustin Pop
    if not vglist:
649 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
650 a8083063 Iustin Pop
                      (node,))
651 a8083063 Iustin Pop
      bad = True
652 a8083063 Iustin Pop
    else:
653 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
654 a8083063 Iustin Pop
      if vgstatus:
655 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
656 a8083063 Iustin Pop
        bad = True
657 a8083063 Iustin Pop
658 a8083063 Iustin Pop
    # checks config file checksum
659 a8083063 Iustin Pop
    # checks ssh to any
660 a8083063 Iustin Pop
661 a8083063 Iustin Pop
    if 'filelist' not in node_result:
662 a8083063 Iustin Pop
      bad = True
663 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
664 a8083063 Iustin Pop
    else:
665 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
666 a8083063 Iustin Pop
      for file_name in file_list:
667 a8083063 Iustin Pop
        if file_name not in remote_cksum:
668 a8083063 Iustin Pop
          bad = True
669 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
670 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
671 a8083063 Iustin Pop
          bad = True
672 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
673 a8083063 Iustin Pop
674 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
675 a8083063 Iustin Pop
      bad = True
676 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
677 a8083063 Iustin Pop
    else:
678 a8083063 Iustin Pop
      if node_result['nodelist']:
679 a8083063 Iustin Pop
        bad = True
680 a8083063 Iustin Pop
        for node in node_result['nodelist']:
681 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
682 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
683 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
684 a8083063 Iustin Pop
    if hyp_result is not None:
685 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
686 a8083063 Iustin Pop
    return bad
687 a8083063 Iustin Pop
688 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
689 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
690 a8083063 Iustin Pop
    """Verify an instance.
691 a8083063 Iustin Pop

692 a8083063 Iustin Pop
    This function checks to see if the required block devices are
693 a8083063 Iustin Pop
    available on the instance's node.
694 a8083063 Iustin Pop

695 a8083063 Iustin Pop
    """
696 a8083063 Iustin Pop
    bad = False
697 a8083063 Iustin Pop
698 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
699 a8083063 Iustin Pop
700 a8083063 Iustin Pop
    node_vol_should = {}
701 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
702 a8083063 Iustin Pop
703 a8083063 Iustin Pop
    for node in node_vol_should:
704 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
705 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
706 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
707 a8083063 Iustin Pop
                          (volume, node))
708 a8083063 Iustin Pop
          bad = True
709 a8083063 Iustin Pop
710 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
711 a872dae6 Guido Trotter
      if (node_current not in node_instance or
712 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
713 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
714 a8083063 Iustin Pop
                        (instance, node_current))
715 a8083063 Iustin Pop
        bad = True
716 a8083063 Iustin Pop
717 a8083063 Iustin Pop
    for node in node_instance:
718 a8083063 Iustin Pop
      if (not node == node_current):
719 a8083063 Iustin Pop
        if instance in node_instance[node]:
720 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
721 a8083063 Iustin Pop
                          (instance, node))
722 a8083063 Iustin Pop
          bad = True
723 a8083063 Iustin Pop
724 6a438c98 Michael Hanselmann
    return bad
725 a8083063 Iustin Pop
726 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
727 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
728 a8083063 Iustin Pop

729 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
730 a8083063 Iustin Pop
    reported as unknown.
731 a8083063 Iustin Pop

732 a8083063 Iustin Pop
    """
733 a8083063 Iustin Pop
    bad = False
734 a8083063 Iustin Pop
735 a8083063 Iustin Pop
    for node in node_vol_is:
736 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
737 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
738 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
739 a8083063 Iustin Pop
                      (volume, node))
740 a8083063 Iustin Pop
          bad = True
741 a8083063 Iustin Pop
    return bad
742 a8083063 Iustin Pop
743 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
744 a8083063 Iustin Pop
    """Verify the list of running instances.
745 a8083063 Iustin Pop

746 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
747 a8083063 Iustin Pop

748 a8083063 Iustin Pop
    """
749 a8083063 Iustin Pop
    bad = False
750 a8083063 Iustin Pop
    for node in node_instance:
751 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
752 a8083063 Iustin Pop
        if runninginstance not in instancelist:
753 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
754 a8083063 Iustin Pop
                          (runninginstance, node))
755 a8083063 Iustin Pop
          bad = True
756 a8083063 Iustin Pop
    return bad
757 a8083063 Iustin Pop
758 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
759 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
760 2b3b6ddd Guido Trotter

761 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
762 2b3b6ddd Guido Trotter
    was primary for.
763 2b3b6ddd Guido Trotter

764 2b3b6ddd Guido Trotter
    """
765 2b3b6ddd Guido Trotter
    bad = False
766 2b3b6ddd Guido Trotter
767 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
768 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
769 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
770 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
771 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
772 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
773 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
774 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
775 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
776 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
777 2b3b6ddd Guido Trotter
        needed_mem = 0
778 2b3b6ddd Guido Trotter
        for instance in instances:
779 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
780 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
781 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
782 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
783 2b3b6ddd Guido Trotter
          bad = True
784 2b3b6ddd Guido Trotter
    return bad
785 2b3b6ddd Guido Trotter
786 a8083063 Iustin Pop
  def CheckPrereq(self):
787 a8083063 Iustin Pop
    """Check prerequisites.
788 a8083063 Iustin Pop

789 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
790 e54c4c5e Guido Trotter
    all its members are valid.
791 a8083063 Iustin Pop

792 a8083063 Iustin Pop
    """
793 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
794 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
795 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
796 a8083063 Iustin Pop
797 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
798 d8fff41c Guido Trotter
    """Build hooks env.
799 d8fff41c Guido Trotter

800 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
801 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
802 d8fff41c Guido Trotter

803 d8fff41c Guido Trotter
    """
804 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
805 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
806 d8fff41c Guido Trotter
    env = {}
807 d8fff41c Guido Trotter
    return env, [], all_nodes
808 d8fff41c Guido Trotter
809 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
810 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
811 a8083063 Iustin Pop

812 a8083063 Iustin Pop
    """
813 a8083063 Iustin Pop
    bad = False
814 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
815 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
816 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
817 a8083063 Iustin Pop
818 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
819 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
820 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
821 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
822 a8083063 Iustin Pop
    node_volume = {}
823 a8083063 Iustin Pop
    node_instance = {}
824 9c9c7d30 Guido Trotter
    node_info = {}
825 26b6af5e Guido Trotter
    instance_cfg = {}
826 a8083063 Iustin Pop
827 a8083063 Iustin Pop
    # FIXME: verify OS list
828 a8083063 Iustin Pop
    # do local checksums
829 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
830 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
831 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
832 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
833 a8083063 Iustin Pop
834 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
835 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
836 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
837 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
838 a8083063 Iustin Pop
    node_verify_param = {
839 a8083063 Iustin Pop
      'filelist': file_names,
840 a8083063 Iustin Pop
      'nodelist': nodelist,
841 a8083063 Iustin Pop
      'hypervisor': None,
842 a8083063 Iustin Pop
      }
843 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
844 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
845 9c9c7d30 Guido Trotter
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
846 a8083063 Iustin Pop
847 a8083063 Iustin Pop
    for node in nodelist:
848 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
849 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
850 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
851 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
852 a8083063 Iustin Pop
      bad = bad or result
853 a8083063 Iustin Pop
854 a8083063 Iustin Pop
      # node_volume
855 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
856 a8083063 Iustin Pop
857 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
858 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
859 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
860 b63ed789 Iustin Pop
        bad = True
861 b63ed789 Iustin Pop
        node_volume[node] = {}
862 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
863 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
864 a8083063 Iustin Pop
        bad = True
865 a8083063 Iustin Pop
        continue
866 b63ed789 Iustin Pop
      else:
867 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
868 a8083063 Iustin Pop
869 a8083063 Iustin Pop
      # node_instance
870 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
871 a8083063 Iustin Pop
      if type(nodeinstance) != list:
872 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
873 a8083063 Iustin Pop
        bad = True
874 a8083063 Iustin Pop
        continue
875 a8083063 Iustin Pop
876 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
877 a8083063 Iustin Pop
878 9c9c7d30 Guido Trotter
      # node_info
879 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
880 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
881 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
882 9c9c7d30 Guido Trotter
        bad = True
883 9c9c7d30 Guido Trotter
        continue
884 9c9c7d30 Guido Trotter
885 9c9c7d30 Guido Trotter
      try:
886 9c9c7d30 Guido Trotter
        node_info[node] = {
887 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
888 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
889 93e4c50b Guido Trotter
          "pinst": [],
890 93e4c50b Guido Trotter
          "sinst": [],
891 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
892 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
893 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
894 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
895 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
896 36e7da50 Guido Trotter
          # secondary.
897 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
898 9c9c7d30 Guido Trotter
        }
899 9c9c7d30 Guido Trotter
      except ValueError:
900 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
901 9c9c7d30 Guido Trotter
        bad = True
902 9c9c7d30 Guido Trotter
        continue
903 9c9c7d30 Guido Trotter
904 a8083063 Iustin Pop
    node_vol_should = {}
905 a8083063 Iustin Pop
906 a8083063 Iustin Pop
    for instance in instancelist:
907 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
908 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
909 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
910 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
911 c5705f58 Guido Trotter
      bad = bad or result
912 a8083063 Iustin Pop
913 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
914 a8083063 Iustin Pop
915 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
916 26b6af5e Guido Trotter
917 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
918 93e4c50b Guido Trotter
      if pnode in node_info:
919 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
920 93e4c50b Guido Trotter
      else:
921 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
922 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
923 93e4c50b Guido Trotter
        bad = True
924 93e4c50b Guido Trotter
925 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
926 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
927 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
928 93e4c50b Guido Trotter
      # supported either.
929 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
930 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
931 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
932 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
933 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
934 93e4c50b Guido Trotter
                    % instance)
935 93e4c50b Guido Trotter
936 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
937 93e4c50b Guido Trotter
        if snode in node_info:
938 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
939 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
940 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
941 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
942 93e4c50b Guido Trotter
        else:
943 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
944 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
945 93e4c50b Guido Trotter
946 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
947 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
948 a8083063 Iustin Pop
                                       feedback_fn)
949 a8083063 Iustin Pop
    bad = bad or result
950 a8083063 Iustin Pop
951 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
952 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
953 a8083063 Iustin Pop
                                         feedback_fn)
954 a8083063 Iustin Pop
    bad = bad or result
955 a8083063 Iustin Pop
956 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
957 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
958 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
959 e54c4c5e Guido Trotter
      bad = bad or result
960 2b3b6ddd Guido Trotter
961 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
962 2b3b6ddd Guido Trotter
    if i_non_redundant:
963 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
964 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
965 2b3b6ddd Guido Trotter
966 a8083063 Iustin Pop
    return int(bad)
967 a8083063 Iustin Pop
968 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
969 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
970 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
971 d8fff41c Guido Trotter

972 d8fff41c Guido Trotter
    Args:
973 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
974 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
975 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
976 d8fff41c Guido Trotter
      lu_result: previous Exec result
977 d8fff41c Guido Trotter

978 d8fff41c Guido Trotter
    """
979 d8fff41c Guido Trotter
    # We only really run POST phase hooks, and are only interested in their results
980 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
981 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
982 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
983 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
984 d8fff41c Guido Trotter
      if not hooks_results:
985 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
986 d8fff41c Guido Trotter
        lu_result = 1
987 d8fff41c Guido Trotter
      else:
988 d8fff41c Guido Trotter
        for node_name in hooks_results:
989 d8fff41c Guido Trotter
          show_node_header = True
990 d8fff41c Guido Trotter
          res = hooks_results[node_name]
991 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
992 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
993 d8fff41c Guido Trotter
            lu_result = 1
994 d8fff41c Guido Trotter
            continue
995 d8fff41c Guido Trotter
          for script, hkr, output in res:
996 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
997 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
998 d8fff41c Guido Trotter
              # failing hooks on that node
999 d8fff41c Guido Trotter
              if show_node_header:
1000 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1001 d8fff41c Guido Trotter
                show_node_header = False
1002 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1003 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1004 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1005 d8fff41c Guido Trotter
              lu_result = 1
1006 d8fff41c Guido Trotter
1007 d8fff41c Guido Trotter
      return lu_result
1008 d8fff41c Guido Trotter
1009 a8083063 Iustin Pop
1010 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1011 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1012 2c95a8d4 Iustin Pop

1013 2c95a8d4 Iustin Pop
  """
1014 2c95a8d4 Iustin Pop
  _OP_REQP = []
1015 2c95a8d4 Iustin Pop
1016 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1017 2c95a8d4 Iustin Pop
    """Check prerequisites.
1018 2c95a8d4 Iustin Pop

1019 2c95a8d4 Iustin Pop
    This has no prerequisites.
1020 2c95a8d4 Iustin Pop

1021 2c95a8d4 Iustin Pop
    """
1022 2c95a8d4 Iustin Pop
    pass
1023 2c95a8d4 Iustin Pop
1024 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1025 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1026 2c95a8d4 Iustin Pop

1027 2c95a8d4 Iustin Pop
    """
1028 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1029 2c95a8d4 Iustin Pop
1030 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1031 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1032 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1033 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1034 2c95a8d4 Iustin Pop
1035 2c95a8d4 Iustin Pop
    nv_dict = {}
1036 2c95a8d4 Iustin Pop
    for inst in instances:
1037 2c95a8d4 Iustin Pop
      inst_lvs = {}
1038 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
1039 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1040 2c95a8d4 Iustin Pop
        continue
1041 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1042 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1043 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1044 2c95a8d4 Iustin Pop
        for vol in vol_list:
1045 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1046 2c95a8d4 Iustin Pop
1047 2c95a8d4 Iustin Pop
    if not nv_dict:
1048 2c95a8d4 Iustin Pop
      return result
1049 2c95a8d4 Iustin Pop
1050 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
1051 2c95a8d4 Iustin Pop
1052 2c95a8d4 Iustin Pop
    to_act = set()
1053 2c95a8d4 Iustin Pop
    for node in nodes:
1054 2c95a8d4 Iustin Pop
      # node_volume
1055 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1056 2c95a8d4 Iustin Pop
1057 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1058 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
1059 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1060 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1061 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
1062 2c95a8d4 Iustin Pop
                    (node,))
1063 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1064 2c95a8d4 Iustin Pop
        continue
1065 2c95a8d4 Iustin Pop
1066 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1067 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1068 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1069 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1070 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1071 2c95a8d4 Iustin Pop
1072 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1073 b63ed789 Iustin Pop
    # data better
1074 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1075 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1076 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1077 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1078 b63ed789 Iustin Pop
1079 2c95a8d4 Iustin Pop
    return result
1080 2c95a8d4 Iustin Pop
1081 2c95a8d4 Iustin Pop
1082 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1083 07bd8a51 Iustin Pop
  """Rename the cluster.
1084 07bd8a51 Iustin Pop

1085 07bd8a51 Iustin Pop
  """
1086 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1087 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1088 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1089 07bd8a51 Iustin Pop
1090 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1091 07bd8a51 Iustin Pop
    """Build hooks env.
1092 07bd8a51 Iustin Pop

1093 07bd8a51 Iustin Pop
    """
1094 07bd8a51 Iustin Pop
    env = {
1095 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
1096 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1097 07bd8a51 Iustin Pop
      }
1098 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
1099 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1100 07bd8a51 Iustin Pop
1101 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1102 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1103 07bd8a51 Iustin Pop

1104 07bd8a51 Iustin Pop
    """
1105 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1106 07bd8a51 Iustin Pop
1107 bcf043c9 Iustin Pop
    new_name = hostname.name
1108 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1109 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
1110 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
1111 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1112 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1113 07bd8a51 Iustin Pop
                                 " cluster has changed")
1114 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1115 07bd8a51 Iustin Pop
      result = utils.RunCmd(["fping", "-q", new_ip])
1116 07bd8a51 Iustin Pop
      if not result.failed:
1117 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1118 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1119 07bd8a51 Iustin Pop
                                   new_ip)
1120 07bd8a51 Iustin Pop
1121 07bd8a51 Iustin Pop
    self.op.name = new_name
1122 07bd8a51 Iustin Pop
1123 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1124 07bd8a51 Iustin Pop
    """Rename the cluster.
1125 07bd8a51 Iustin Pop

1126 07bd8a51 Iustin Pop
    """
1127 07bd8a51 Iustin Pop
    clustername = self.op.name
1128 07bd8a51 Iustin Pop
    ip = self.ip
1129 07bd8a51 Iustin Pop
    ss = self.sstore
1130 07bd8a51 Iustin Pop
1131 07bd8a51 Iustin Pop
    # shutdown the master IP
1132 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
1133 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
1134 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1135 07bd8a51 Iustin Pop
1136 07bd8a51 Iustin Pop
    try:
1137 07bd8a51 Iustin Pop
      # modify the sstore
1138 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1139 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1140 07bd8a51 Iustin Pop
1141 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1142 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1143 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1144 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1145 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1146 07bd8a51 Iustin Pop
1147 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1148 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1149 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1150 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1151 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1152 07bd8a51 Iustin Pop
          if not result[to_node]:
1153 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1154 07bd8a51 Iustin Pop
                         (fname, to_node))
1155 07bd8a51 Iustin Pop
    finally:
1156 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
1157 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1158 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1159 07bd8a51 Iustin Pop
1160 07bd8a51 Iustin Pop
1161 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1162 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1163 8084f9f6 Manuel Franceschini

1164 8084f9f6 Manuel Franceschini
  Args:
1165 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1166 8084f9f6 Manuel Franceschini

1167 8084f9f6 Manuel Franceschini
  Returns:
1168 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1169 8084f9f6 Manuel Franceschini

1170 8084f9f6 Manuel Franceschini
  """
1171 8084f9f6 Manuel Franceschini
  if disk.children:
1172 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1173 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1174 8084f9f6 Manuel Franceschini
        return True
1175 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1176 8084f9f6 Manuel Franceschini
1177 8084f9f6 Manuel Franceschini
1178 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1179 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1180 8084f9f6 Manuel Franceschini

1181 8084f9f6 Manuel Franceschini
  """
1182 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1183 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1184 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1185 8084f9f6 Manuel Franceschini
1186 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1187 8084f9f6 Manuel Franceschini
    """Build hooks env.
1188 8084f9f6 Manuel Franceschini

1189 8084f9f6 Manuel Franceschini
    """
1190 8084f9f6 Manuel Franceschini
    env = {
1191 8084f9f6 Manuel Franceschini
      "OP_TARGET": self.sstore.GetClusterName(),
1192 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1193 8084f9f6 Manuel Franceschini
      }
1194 8084f9f6 Manuel Franceschini
    mn = self.sstore.GetMasterNode()
1195 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1196 8084f9f6 Manuel Franceschini
1197 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1198 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1199 8084f9f6 Manuel Franceschini

1200 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1201 5f83e263 Iustin Pop
    if the given volume group is valid.
1202 8084f9f6 Manuel Franceschini

1203 8084f9f6 Manuel Franceschini
    """
1204 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
1205 8084f9f6 Manuel Franceschini
      instances = [self.cfg.GetInstanceInfo(name)
1206 8084f9f6 Manuel Franceschini
                   for name in self.cfg.GetInstanceList()]
1207 8084f9f6 Manuel Franceschini
      for inst in instances:
1208 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1209 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1210 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1211 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1212 8084f9f6 Manuel Franceschini
1213 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1214 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1215 8084f9f6 Manuel Franceschini
      node_list = self.cfg.GetNodeList()
1216 8084f9f6 Manuel Franceschini
      vglist = rpc.call_vg_list(node_list)
1217 8084f9f6 Manuel Franceschini
      for node in node_list:
1218 8084f9f6 Manuel Franceschini
        vgstatus = _HasValidVG(vglist[node], self.op.vg_name)
1219 8084f9f6 Manuel Franceschini
        if vgstatus:
1220 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1221 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1222 8084f9f6 Manuel Franceschini
1223 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1224 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1225 8084f9f6 Manuel Franceschini

1226 8084f9f6 Manuel Franceschini
    """
1227 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
1228 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
1229 8084f9f6 Manuel Franceschini
    else:
1230 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1231 8084f9f6 Manuel Franceschini
                  " state, not changing")
1232 8084f9f6 Manuel Franceschini
1233 8084f9f6 Manuel Franceschini
1234 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1235 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1236 a8083063 Iustin Pop

1237 a8083063 Iustin Pop
  """
1238 a8083063 Iustin Pop
  if not instance.disks:
1239 a8083063 Iustin Pop
    return True
1240 a8083063 Iustin Pop
1241 a8083063 Iustin Pop
  if not oneshot:
1242 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1243 a8083063 Iustin Pop
1244 a8083063 Iustin Pop
  node = instance.primary_node
1245 a8083063 Iustin Pop
1246 a8083063 Iustin Pop
  for dev in instance.disks:
1247 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1248 a8083063 Iustin Pop
1249 a8083063 Iustin Pop
  retries = 0
1250 a8083063 Iustin Pop
  while True:
1251 a8083063 Iustin Pop
    max_time = 0
1252 a8083063 Iustin Pop
    done = True
1253 a8083063 Iustin Pop
    cumul_degraded = False
1254 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1255 a8083063 Iustin Pop
    if not rstats:
1256 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1257 a8083063 Iustin Pop
      retries += 1
1258 a8083063 Iustin Pop
      if retries >= 10:
1259 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1260 3ecf6786 Iustin Pop
                                 " aborting." % node)
1261 a8083063 Iustin Pop
      time.sleep(6)
1262 a8083063 Iustin Pop
      continue
1263 a8083063 Iustin Pop
    retries = 0
1264 a8083063 Iustin Pop
    for i in range(len(rstats)):
1265 a8083063 Iustin Pop
      mstat = rstats[i]
1266 a8083063 Iustin Pop
      if mstat is None:
1267 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1268 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1269 a8083063 Iustin Pop
        continue
1270 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1271 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1272 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1273 a8083063 Iustin Pop
      if perc_done is not None:
1274 a8083063 Iustin Pop
        done = False
1275 a8083063 Iustin Pop
        if est_time is not None:
1276 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1277 a8083063 Iustin Pop
          max_time = est_time
1278 a8083063 Iustin Pop
        else:
1279 a8083063 Iustin Pop
          rem_time = "no time estimate"
1280 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1281 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1282 a8083063 Iustin Pop
    if done or oneshot:
1283 a8083063 Iustin Pop
      break
1284 a8083063 Iustin Pop
1285 a8083063 Iustin Pop
    if unlock:
1286 685ee993 Iustin Pop
      #utils.Unlock('cmd')
1287 685ee993 Iustin Pop
      pass
1288 a8083063 Iustin Pop
    try:
1289 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1290 a8083063 Iustin Pop
    finally:
1291 a8083063 Iustin Pop
      if unlock:
1292 685ee993 Iustin Pop
        #utils.Lock('cmd')
1293 685ee993 Iustin Pop
        pass
1294 a8083063 Iustin Pop
1295 a8083063 Iustin Pop
  if done:
1296 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1297 a8083063 Iustin Pop
  return not cumul_degraded
1298 a8083063 Iustin Pop
1299 a8083063 Iustin Pop
1300 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1301 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1302 a8083063 Iustin Pop

1303 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1304 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1305 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1306 0834c866 Iustin Pop

1307 a8083063 Iustin Pop
  """
1308 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1309 0834c866 Iustin Pop
  if ldisk:
1310 0834c866 Iustin Pop
    idx = 6
1311 0834c866 Iustin Pop
  else:
1312 0834c866 Iustin Pop
    idx = 5
1313 a8083063 Iustin Pop
1314 a8083063 Iustin Pop
  result = True
1315 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1316 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1317 a8083063 Iustin Pop
    if not rstats:
1318 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1319 a8083063 Iustin Pop
      result = False
1320 a8083063 Iustin Pop
    else:
1321 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1322 a8083063 Iustin Pop
  if dev.children:
1323 a8083063 Iustin Pop
    for child in dev.children:
1324 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1325 a8083063 Iustin Pop
1326 a8083063 Iustin Pop
  return result
1327 a8083063 Iustin Pop
1328 a8083063 Iustin Pop
1329 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1330 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1331 a8083063 Iustin Pop

1332 a8083063 Iustin Pop
  """
1333 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1334 a8083063 Iustin Pop
1335 a8083063 Iustin Pop
  def CheckPrereq(self):
1336 a8083063 Iustin Pop
    """Check prerequisites.
1337 a8083063 Iustin Pop

1338 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1339 a8083063 Iustin Pop

1340 a8083063 Iustin Pop
    """
1341 1f9430d6 Iustin Pop
    if self.op.names:
1342 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1343 1f9430d6 Iustin Pop
1344 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1345 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1346 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1347 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1348 1f9430d6 Iustin Pop
1349 1f9430d6 Iustin Pop
  @staticmethod
1350 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1351 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1352 1f9430d6 Iustin Pop

1353 1f9430d6 Iustin Pop
      Args:
1354 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1355 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1356 1f9430d6 Iustin Pop

1357 1f9430d6 Iustin Pop
      Returns:
1358 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1359 1f9430d6 Iustin Pop
             nodes as
1360 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1361 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1362 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1363 1f9430d6 Iustin Pop
                  }
1364 1f9430d6 Iustin Pop

1365 1f9430d6 Iustin Pop
    """
1366 1f9430d6 Iustin Pop
    all_os = {}
1367 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1368 1f9430d6 Iustin Pop
      if not nr:
1369 1f9430d6 Iustin Pop
        continue
1370 b4de68a9 Iustin Pop
      for os_obj in nr:
1371 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1372 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1373 1f9430d6 Iustin Pop
          # for each node in node_list
1374 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1375 1f9430d6 Iustin Pop
          for nname in node_list:
1376 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1377 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1378 1f9430d6 Iustin Pop
    return all_os
1379 a8083063 Iustin Pop
1380 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1381 a8083063 Iustin Pop
    """Compute the list of OSes.
1382 a8083063 Iustin Pop

1383 a8083063 Iustin Pop
    """
1384 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1385 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1386 a8083063 Iustin Pop
    if node_data == False:
1387 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1388 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1389 1f9430d6 Iustin Pop
    output = []
1390 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1391 1f9430d6 Iustin Pop
      row = []
1392 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1393 1f9430d6 Iustin Pop
        if field == "name":
1394 1f9430d6 Iustin Pop
          val = os_name
1395 1f9430d6 Iustin Pop
        elif field == "valid":
1396 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1397 1f9430d6 Iustin Pop
        elif field == "node_status":
1398 1f9430d6 Iustin Pop
          val = {}
1399 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1400 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1401 1f9430d6 Iustin Pop
        else:
1402 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1403 1f9430d6 Iustin Pop
        row.append(val)
1404 1f9430d6 Iustin Pop
      output.append(row)
1405 1f9430d6 Iustin Pop
1406 1f9430d6 Iustin Pop
    return output
1407 a8083063 Iustin Pop
1408 a8083063 Iustin Pop
1409 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1410 a8083063 Iustin Pop
  """Logical unit for removing a node.
1411 a8083063 Iustin Pop

1412 a8083063 Iustin Pop
  """
1413 a8083063 Iustin Pop
  HPATH = "node-remove"
1414 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1415 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1416 a8083063 Iustin Pop
1417 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1418 a8083063 Iustin Pop
    """Build hooks env.
1419 a8083063 Iustin Pop

1420 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1421 a8083063 Iustin Pop
    node would not allows itself to run.
1422 a8083063 Iustin Pop

1423 a8083063 Iustin Pop
    """
1424 396e1b78 Michael Hanselmann
    env = {
1425 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1426 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1427 396e1b78 Michael Hanselmann
      }
1428 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1429 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1430 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1431 a8083063 Iustin Pop
1432 a8083063 Iustin Pop
  def CheckPrereq(self):
1433 a8083063 Iustin Pop
    """Check prerequisites.
1434 a8083063 Iustin Pop

1435 a8083063 Iustin Pop
    This checks:
1436 a8083063 Iustin Pop
     - the node exists in the configuration
1437 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1438 a8083063 Iustin Pop
     - it's not the master
1439 a8083063 Iustin Pop

1440 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1441 a8083063 Iustin Pop

1442 a8083063 Iustin Pop
    """
1443 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1444 a8083063 Iustin Pop
    if node is None:
1445 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1446 a8083063 Iustin Pop
1447 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1448 a8083063 Iustin Pop
1449 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1450 a8083063 Iustin Pop
    if node.name == masternode:
1451 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1452 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1453 a8083063 Iustin Pop
1454 a8083063 Iustin Pop
    for instance_name in instance_list:
1455 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1456 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1457 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1458 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1459 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1460 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1461 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1462 a8083063 Iustin Pop
    self.op.node_name = node.name
1463 a8083063 Iustin Pop
    self.node = node
1464 a8083063 Iustin Pop
1465 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1466 a8083063 Iustin Pop
    """Removes the node from the cluster.
1467 a8083063 Iustin Pop

1468 a8083063 Iustin Pop
    """
1469 a8083063 Iustin Pop
    node = self.node
1470 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1471 a8083063 Iustin Pop
                node.name)
1472 a8083063 Iustin Pop
1473 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1474 a8083063 Iustin Pop
1475 c92b310a Michael Hanselmann
    self.ssh.Run(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1476 a8083063 Iustin Pop
1477 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1478 a8083063 Iustin Pop
1479 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1480 a8083063 Iustin Pop
1481 c8a0948f Michael Hanselmann
    _RemoveHostFromEtcHosts(node.name)
1482 c8a0948f Michael Hanselmann
1483 a8083063 Iustin Pop
1484 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1485 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1486 a8083063 Iustin Pop

1487 a8083063 Iustin Pop
  """
1488 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1489 a8083063 Iustin Pop
1490 a8083063 Iustin Pop
  def CheckPrereq(self):
1491 a8083063 Iustin Pop
    """Check prerequisites.
1492 a8083063 Iustin Pop

1493 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1494 a8083063 Iustin Pop

1495 a8083063 Iustin Pop
    """
1496 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1497 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1498 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1499 e8a4c138 Iustin Pop
      "bootid",
1500 e8a4c138 Iustin Pop
      "ctotal",
1501 e8a4c138 Iustin Pop
      ])
1502 a8083063 Iustin Pop
1503 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1504 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1505 ec223efb Iustin Pop
                               "pip", "sip"],
1506 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1507 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1508 a8083063 Iustin Pop
1509 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1510 a8083063 Iustin Pop
1511 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1512 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1513 a8083063 Iustin Pop

1514 a8083063 Iustin Pop
    """
1515 246e180a Iustin Pop
    nodenames = self.wanted
1516 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1517 a8083063 Iustin Pop
1518 a8083063 Iustin Pop
    # begin data gathering
1519 a8083063 Iustin Pop
1520 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1521 a8083063 Iustin Pop
      live_data = {}
1522 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1523 a8083063 Iustin Pop
      for name in nodenames:
1524 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1525 a8083063 Iustin Pop
        if nodeinfo:
1526 a8083063 Iustin Pop
          live_data[name] = {
1527 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1528 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1529 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1530 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1531 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1532 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1533 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1534 a8083063 Iustin Pop
            }
1535 a8083063 Iustin Pop
        else:
1536 a8083063 Iustin Pop
          live_data[name] = {}
1537 a8083063 Iustin Pop
    else:
1538 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1539 a8083063 Iustin Pop
1540 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1541 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1542 a8083063 Iustin Pop
1543 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1544 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1545 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1546 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1547 a8083063 Iustin Pop
1548 ec223efb Iustin Pop
      for instance_name in instancelist:
1549 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1550 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1551 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1552 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1553 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1554 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1555 a8083063 Iustin Pop
1556 a8083063 Iustin Pop
    # end data gathering
1557 a8083063 Iustin Pop
1558 a8083063 Iustin Pop
    output = []
1559 a8083063 Iustin Pop
    for node in nodelist:
1560 a8083063 Iustin Pop
      node_output = []
1561 a8083063 Iustin Pop
      for field in self.op.output_fields:
1562 a8083063 Iustin Pop
        if field == "name":
1563 a8083063 Iustin Pop
          val = node.name
1564 ec223efb Iustin Pop
        elif field == "pinst_list":
1565 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1566 ec223efb Iustin Pop
        elif field == "sinst_list":
1567 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1568 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1569 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1570 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1571 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1572 a8083063 Iustin Pop
        elif field == "pip":
1573 a8083063 Iustin Pop
          val = node.primary_ip
1574 a8083063 Iustin Pop
        elif field == "sip":
1575 a8083063 Iustin Pop
          val = node.secondary_ip
1576 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1577 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1578 a8083063 Iustin Pop
        else:
1579 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1580 a8083063 Iustin Pop
        node_output.append(val)
1581 a8083063 Iustin Pop
      output.append(node_output)
1582 a8083063 Iustin Pop
1583 a8083063 Iustin Pop
    return output
1584 a8083063 Iustin Pop
1585 a8083063 Iustin Pop
1586 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1587 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1588 dcb93971 Michael Hanselmann

1589 dcb93971 Michael Hanselmann
  """
1590 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1591 dcb93971 Michael Hanselmann
1592 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1593 dcb93971 Michael Hanselmann
    """Check prerequisites.
1594 dcb93971 Michael Hanselmann

1595 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1596 dcb93971 Michael Hanselmann

1597 dcb93971 Michael Hanselmann
    """
1598 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1599 dcb93971 Michael Hanselmann
1600 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1601 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1602 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1603 dcb93971 Michael Hanselmann
1604 dcb93971 Michael Hanselmann
1605 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1606 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1607 dcb93971 Michael Hanselmann

1608 dcb93971 Michael Hanselmann
    """
1609 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1610 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1611 dcb93971 Michael Hanselmann
1612 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1613 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1614 dcb93971 Michael Hanselmann
1615 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1616 dcb93971 Michael Hanselmann
1617 dcb93971 Michael Hanselmann
    output = []
1618 dcb93971 Michael Hanselmann
    for node in nodenames:
1619 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1620 37d19eb2 Michael Hanselmann
        continue
1621 37d19eb2 Michael Hanselmann
1622 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1623 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1624 dcb93971 Michael Hanselmann
1625 dcb93971 Michael Hanselmann
      for vol in node_vols:
1626 dcb93971 Michael Hanselmann
        node_output = []
1627 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1628 dcb93971 Michael Hanselmann
          if field == "node":
1629 dcb93971 Michael Hanselmann
            val = node
1630 dcb93971 Michael Hanselmann
          elif field == "phys":
1631 dcb93971 Michael Hanselmann
            val = vol['dev']
1632 dcb93971 Michael Hanselmann
          elif field == "vg":
1633 dcb93971 Michael Hanselmann
            val = vol['vg']
1634 dcb93971 Michael Hanselmann
          elif field == "name":
1635 dcb93971 Michael Hanselmann
            val = vol['name']
1636 dcb93971 Michael Hanselmann
          elif field == "size":
1637 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1638 dcb93971 Michael Hanselmann
          elif field == "instance":
1639 dcb93971 Michael Hanselmann
            for inst in ilist:
1640 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1641 dcb93971 Michael Hanselmann
                continue
1642 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1643 dcb93971 Michael Hanselmann
                val = inst.name
1644 dcb93971 Michael Hanselmann
                break
1645 dcb93971 Michael Hanselmann
            else:
1646 dcb93971 Michael Hanselmann
              val = '-'
1647 dcb93971 Michael Hanselmann
          else:
1648 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1649 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1650 dcb93971 Michael Hanselmann
1651 dcb93971 Michael Hanselmann
        output.append(node_output)
1652 dcb93971 Michael Hanselmann
1653 dcb93971 Michael Hanselmann
    return output
1654 dcb93971 Michael Hanselmann
1655 dcb93971 Michael Hanselmann
1656 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1657 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1658 a8083063 Iustin Pop

1659 a8083063 Iustin Pop
  """
1660 a8083063 Iustin Pop
  HPATH = "node-add"
1661 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1662 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1663 a8083063 Iustin Pop
1664 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1665 a8083063 Iustin Pop
    """Build hooks env.
1666 a8083063 Iustin Pop

1667 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1668 a8083063 Iustin Pop

1669 a8083063 Iustin Pop
    """
1670 a8083063 Iustin Pop
    env = {
1671 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1672 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1673 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1674 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1675 a8083063 Iustin Pop
      }
1676 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1677 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1678 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1679 a8083063 Iustin Pop
1680 a8083063 Iustin Pop
  def CheckPrereq(self):
1681 a8083063 Iustin Pop
    """Check prerequisites.
1682 a8083063 Iustin Pop

1683 a8083063 Iustin Pop
    This checks:
1684 a8083063 Iustin Pop
     - the new node is not already in the config
1685 a8083063 Iustin Pop
     - it is resolvable
1686 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1687 a8083063 Iustin Pop

1688 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1689 a8083063 Iustin Pop

1690 a8083063 Iustin Pop
    """
1691 a8083063 Iustin Pop
    node_name = self.op.node_name
1692 a8083063 Iustin Pop
    cfg = self.cfg
1693 a8083063 Iustin Pop
1694 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1695 a8083063 Iustin Pop
1696 bcf043c9 Iustin Pop
    node = dns_data.name
1697 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1698 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1699 a8083063 Iustin Pop
    if secondary_ip is None:
1700 a8083063 Iustin Pop
      secondary_ip = primary_ip
1701 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1702 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1703 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1704 e7c6e02b Michael Hanselmann
1705 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1706 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1707 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1708 e7c6e02b Michael Hanselmann
                                 node)
1709 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1710 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1711 a8083063 Iustin Pop
1712 a8083063 Iustin Pop
    for existing_node_name in node_list:
1713 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1714 e7c6e02b Michael Hanselmann
1715 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1716 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1717 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1718 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1719 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1720 e7c6e02b Michael Hanselmann
        continue
1721 e7c6e02b Michael Hanselmann
1722 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1723 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1724 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1725 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1726 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1727 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1728 a8083063 Iustin Pop
1729 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1730 a8083063 Iustin Pop
    # same as for the master
1731 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1732 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1733 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1734 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1735 a8083063 Iustin Pop
      if master_singlehomed:
1736 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1737 3ecf6786 Iustin Pop
                                   " new node has one")
1738 a8083063 Iustin Pop
      else:
1739 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1740 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1741 a8083063 Iustin Pop
1742 a8083063 Iustin Pop
    # checks reachablity
1743 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1744 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1745 a8083063 Iustin Pop
1746 a8083063 Iustin Pop
    if not newbie_singlehomed:
1747 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1748 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1749 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1750 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1751 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1752 a8083063 Iustin Pop
1753 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1754 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1755 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1756 a8083063 Iustin Pop
1757 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1758 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
1759 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Cluster VNC password file %s missing" %
1760 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
1761 2a6469d5 Alexander Schreiber
1762 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1763 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1764 a8083063 Iustin Pop

1765 a8083063 Iustin Pop
    """
1766 a8083063 Iustin Pop
    new_node = self.new_node
1767 a8083063 Iustin Pop
    node = new_node.name
1768 a8083063 Iustin Pop
1769 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1770 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1771 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1772 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1773 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1774 a8083063 Iustin Pop
    try:
1775 a8083063 Iustin Pop
      gntpem = f.read(8192)
1776 a8083063 Iustin Pop
    finally:
1777 a8083063 Iustin Pop
      f.close()
1778 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1779 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1780 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1781 a8083063 Iustin Pop
    # parsed by the shell sequence below
1782 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1783 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1784 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1785 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1786 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1787 a8083063 Iustin Pop
1788 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1789 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1790 a8083063 Iustin Pop
    # either by being constants or by the checks above
1791 a8083063 Iustin Pop
    ss = self.sstore
1792 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1793 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1794 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1795 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1796 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1797 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1798 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1799 a8083063 Iustin Pop
1800 c92b310a Michael Hanselmann
    result = self.ssh.Run(node, 'root', mycommand, batch=False, ask_key=True)
1801 a8083063 Iustin Pop
    if result.failed:
1802 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1803 3ecf6786 Iustin Pop
                               " output: %s" %
1804 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1805 a8083063 Iustin Pop
1806 a8083063 Iustin Pop
    # check connectivity
1807 a8083063 Iustin Pop
    time.sleep(4)
1808 a8083063 Iustin Pop
1809 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1810 a8083063 Iustin Pop
    if result:
1811 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1812 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1813 a8083063 Iustin Pop
                    (node, result))
1814 a8083063 Iustin Pop
      else:
1815 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1816 3ecf6786 Iustin Pop
                                 " node version %s" %
1817 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1818 a8083063 Iustin Pop
    else:
1819 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1820 a8083063 Iustin Pop
1821 a8083063 Iustin Pop
    # setup ssh on node
1822 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1823 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1824 a8083063 Iustin Pop
    keyarray = []
1825 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1826 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1827 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1828 a8083063 Iustin Pop
1829 a8083063 Iustin Pop
    for i in keyfiles:
1830 a8083063 Iustin Pop
      f = open(i, 'r')
1831 a8083063 Iustin Pop
      try:
1832 a8083063 Iustin Pop
        keyarray.append(f.read())
1833 a8083063 Iustin Pop
      finally:
1834 a8083063 Iustin Pop
        f.close()
1835 a8083063 Iustin Pop
1836 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1837 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1838 a8083063 Iustin Pop
1839 a8083063 Iustin Pop
    if not result:
1840 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1841 a8083063 Iustin Pop
1842 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1843 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(new_node.name)
1844 c8a0948f Michael Hanselmann
1845 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1846 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1847 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1848 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1849 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1850 16abfbc2 Alexander Schreiber
                                    10, False):
1851 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1852 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1853 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1854 a8083063 Iustin Pop
1855 c92b310a Michael Hanselmann
    success, msg = self.ssh.VerifyNodeHostname(node)
1856 ff98055b Iustin Pop
    if not success:
1857 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1858 f4bc1f2c Michael Hanselmann
                               " than the one the resolver gives: %s."
1859 f4bc1f2c Michael Hanselmann
                               " Please fix and re-run this command." %
1860 ff98055b Iustin Pop
                               (node, msg))
1861 ff98055b Iustin Pop
1862 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1863 a8083063 Iustin Pop
    # including the node just added
1864 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1865 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1866 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1867 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1868 a8083063 Iustin Pop
1869 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1870 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1871 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1872 a8083063 Iustin Pop
      for to_node in dist_nodes:
1873 a8083063 Iustin Pop
        if not result[to_node]:
1874 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1875 a8083063 Iustin Pop
                       (fname, to_node))
1876 a8083063 Iustin Pop
1877 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1878 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1879 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1880 a8083063 Iustin Pop
    for fname in to_copy:
1881 c92b310a Michael Hanselmann
      if not self.ssh.CopyFileToNode(node, fname):
1882 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1883 a8083063 Iustin Pop
1884 e7c6e02b Michael Hanselmann
    if not self.op.readd:
1885 e7c6e02b Michael Hanselmann
      logger.Info("adding node %s to cluster.conf" % node)
1886 e7c6e02b Michael Hanselmann
      self.cfg.AddNode(new_node)
1887 a8083063 Iustin Pop
1888 a8083063 Iustin Pop
1889 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1890 a8083063 Iustin Pop
  """Failover the master node to the current node.
1891 a8083063 Iustin Pop

1892 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1893 a8083063 Iustin Pop

1894 a8083063 Iustin Pop
  """
1895 a8083063 Iustin Pop
  HPATH = "master-failover"
1896 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1897 a8083063 Iustin Pop
  REQ_MASTER = False
1898 a8083063 Iustin Pop
  _OP_REQP = []
1899 a8083063 Iustin Pop
1900 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1901 a8083063 Iustin Pop
    """Build hooks env.
1902 a8083063 Iustin Pop

1903 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1904 a8083063 Iustin Pop
    the nodes in the post phase.
1905 a8083063 Iustin Pop

1906 a8083063 Iustin Pop
    """
1907 a8083063 Iustin Pop
    env = {
1908 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1909 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1910 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1911 a8083063 Iustin Pop
      }
1912 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1913 a8083063 Iustin Pop
1914 a8083063 Iustin Pop
  def CheckPrereq(self):
1915 a8083063 Iustin Pop
    """Check prerequisites.
1916 a8083063 Iustin Pop

1917 a8083063 Iustin Pop
    This checks that we are not already the master.
1918 a8083063 Iustin Pop

1919 a8083063 Iustin Pop
    """
1920 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1921 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1922 a8083063 Iustin Pop
1923 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1924 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1925 f4bc1f2c Michael Hanselmann
                                 " where you want the new master to be."
1926 f4bc1f2c Michael Hanselmann
                                 " %s is already the master" %
1927 3ecf6786 Iustin Pop
                                 self.old_master)
1928 a8083063 Iustin Pop
1929 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1930 a8083063 Iustin Pop
    """Failover the master node.
1931 a8083063 Iustin Pop

1932 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1933 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1934 a8083063 Iustin Pop
    master.
1935 a8083063 Iustin Pop

1936 a8083063 Iustin Pop
    """
1937 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1938 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1939 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1940 a8083063 Iustin Pop
1941 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1942 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1943 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1944 a8083063 Iustin Pop
1945 880478f8 Iustin Pop
    ss = self.sstore
1946 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1947 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1948 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1949 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1950 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1951 880478f8 Iustin Pop
1952 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1953 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1954 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1955 f4bc1f2c Michael Hanselmann
      feedback_fn("Error in activating the master IP on the new master,"
1956 f4bc1f2c Michael Hanselmann
                  " please fix manually.")
1957 a8083063 Iustin Pop
1958 a8083063 Iustin Pop
1959 a8083063 Iustin Pop
1960 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1961 a8083063 Iustin Pop
  """Query cluster configuration.
1962 a8083063 Iustin Pop

1963 a8083063 Iustin Pop
  """
1964 a8083063 Iustin Pop
  _OP_REQP = []
1965 59322403 Iustin Pop
  REQ_MASTER = False
1966 a8083063 Iustin Pop
1967 a8083063 Iustin Pop
  def CheckPrereq(self):
1968 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1969 a8083063 Iustin Pop

1970 a8083063 Iustin Pop
    """
1971 a8083063 Iustin Pop
    pass
1972 a8083063 Iustin Pop
1973 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1974 a8083063 Iustin Pop
    """Return cluster config.
1975 a8083063 Iustin Pop

1976 a8083063 Iustin Pop
    """
1977 a8083063 Iustin Pop
    result = {
1978 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1979 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1980 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1981 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1982 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1983 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1984 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1985 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1986 a8083063 Iustin Pop
      }
1987 a8083063 Iustin Pop
1988 a8083063 Iustin Pop
    return result
1989 a8083063 Iustin Pop
1990 a8083063 Iustin Pop
1991 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1992 a8083063 Iustin Pop
  """Copy file to cluster.
1993 a8083063 Iustin Pop

1994 a8083063 Iustin Pop
  """
1995 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1996 a8083063 Iustin Pop
1997 a8083063 Iustin Pop
  def CheckPrereq(self):
1998 a8083063 Iustin Pop
    """Check prerequisites.
1999 a8083063 Iustin Pop

2000 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
2001 a8083063 Iustin Pop
    of nodes is valid.
2002 a8083063 Iustin Pop

2003 a8083063 Iustin Pop
    """
2004 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
2005 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
2006 dcb93971 Michael Hanselmann
2007 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
2008 a8083063 Iustin Pop
2009 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2010 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
2011 a8083063 Iustin Pop

2012 a8083063 Iustin Pop
    Args:
2013 a8083063 Iustin Pop
      opts - class with options as members
2014 a8083063 Iustin Pop
      args - list containing a single element, the file name
2015 a8083063 Iustin Pop
    Opts used:
2016 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
2017 a8083063 Iustin Pop

2018 a8083063 Iustin Pop
    """
2019 a8083063 Iustin Pop
    filename = self.op.filename
2020 a8083063 Iustin Pop
2021 89e1fc26 Iustin Pop
    myname = utils.HostInfo().name
2022 a8083063 Iustin Pop
2023 a7ba5e53 Iustin Pop
    for node in self.nodes:
2024 a8083063 Iustin Pop
      if node == myname:
2025 a8083063 Iustin Pop
        continue
2026 c92b310a Michael Hanselmann
      if not self.ssh.CopyFileToNode(node, filename):
2027 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
2028 a8083063 Iustin Pop
2029 a8083063 Iustin Pop
2030 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
2031 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
2032 a8083063 Iustin Pop

2033 a8083063 Iustin Pop
  """
2034 a8083063 Iustin Pop
  _OP_REQP = []
2035 a8083063 Iustin Pop
2036 a8083063 Iustin Pop
  def CheckPrereq(self):
2037 a8083063 Iustin Pop
    """No prerequisites.
2038 a8083063 Iustin Pop

2039 a8083063 Iustin Pop
    """
2040 a8083063 Iustin Pop
    pass
2041 a8083063 Iustin Pop
2042 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2043 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2044 a8083063 Iustin Pop

2045 a8083063 Iustin Pop
    """
2046 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
2047 a8083063 Iustin Pop
2048 a8083063 Iustin Pop
2049 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
2050 a8083063 Iustin Pop
  """Run a command on some nodes.
2051 a8083063 Iustin Pop

2052 a8083063 Iustin Pop
  """
2053 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
2054 a8083063 Iustin Pop
2055 a8083063 Iustin Pop
  def CheckPrereq(self):
2056 a8083063 Iustin Pop
    """Check prerequisites.
2057 a8083063 Iustin Pop

2058 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
2059 a8083063 Iustin Pop

2060 a8083063 Iustin Pop
    """
2061 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
2062 a8083063 Iustin Pop
2063 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2064 a8083063 Iustin Pop
    """Run a command on some nodes.
2065 a8083063 Iustin Pop

2066 a8083063 Iustin Pop
    """
2067 5f83e263 Iustin Pop
    # put the master at the end of the nodes list
2068 5f83e263 Iustin Pop
    master_node = self.sstore.GetMasterNode()
2069 5f83e263 Iustin Pop
    if master_node in self.nodes:
2070 5f83e263 Iustin Pop
      self.nodes.remove(master_node)
2071 5f83e263 Iustin Pop
      self.nodes.append(master_node)
2072 5f83e263 Iustin Pop
2073 a8083063 Iustin Pop
    data = []
2074 a8083063 Iustin Pop
    for node in self.nodes:
2075 c92b310a Michael Hanselmann
      result = self.ssh.Run(node, "root", self.op.command)
2076 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
2077 a8083063 Iustin Pop
2078 a8083063 Iustin Pop
    return data
2079 a8083063 Iustin Pop
2080 a8083063 Iustin Pop
2081 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2082 a8083063 Iustin Pop
  """Bring up an instance's disks.
2083 a8083063 Iustin Pop

2084 a8083063 Iustin Pop
  """
2085 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2086 a8083063 Iustin Pop
2087 a8083063 Iustin Pop
  def CheckPrereq(self):
2088 a8083063 Iustin Pop
    """Check prerequisites.
2089 a8083063 Iustin Pop

2090 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2091 a8083063 Iustin Pop

2092 a8083063 Iustin Pop
    """
2093 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2094 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2095 a8083063 Iustin Pop
    if instance is None:
2096 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2097 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2098 a8083063 Iustin Pop
    self.instance = instance
2099 a8083063 Iustin Pop
2100 a8083063 Iustin Pop
2101 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2102 a8083063 Iustin Pop
    """Activate the disks.
2103 a8083063 Iustin Pop

2104 a8083063 Iustin Pop
    """
2105 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
2106 a8083063 Iustin Pop
    if not disks_ok:
2107 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2108 a8083063 Iustin Pop
2109 a8083063 Iustin Pop
    return disks_info
2110 a8083063 Iustin Pop
2111 a8083063 Iustin Pop
2112 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
2113 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2114 a8083063 Iustin Pop

2115 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2116 a8083063 Iustin Pop

2117 a8083063 Iustin Pop
  Args:
2118 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
2119 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
2120 a8083063 Iustin Pop
                        in an error return from the function
2121 a8083063 Iustin Pop

2122 a8083063 Iustin Pop
  Returns:
2123 a8083063 Iustin Pop
    false if the operation failed
2124 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
2125 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
2126 a8083063 Iustin Pop
  """
2127 a8083063 Iustin Pop
  device_info = []
2128 a8083063 Iustin Pop
  disks_ok = True
2129 fdbd668d Iustin Pop
  iname = instance.name
2130 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2131 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2132 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2133 fdbd668d Iustin Pop
2134 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2135 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2136 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2137 fdbd668d Iustin Pop
  # SyncSource, etc.)
2138 fdbd668d Iustin Pop
2139 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2140 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2141 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2142 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
2143 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
2144 a8083063 Iustin Pop
      if not result:
2145 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
2146 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
2147 fdbd668d Iustin Pop
        if not ignore_secondaries:
2148 a8083063 Iustin Pop
          disks_ok = False
2149 fdbd668d Iustin Pop
2150 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2151 fdbd668d Iustin Pop
2152 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2153 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2154 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2155 fdbd668d Iustin Pop
      if node != instance.primary_node:
2156 fdbd668d Iustin Pop
        continue
2157 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
2158 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
2159 fdbd668d Iustin Pop
      if not result:
2160 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
2161 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
2162 fdbd668d Iustin Pop
        disks_ok = False
2163 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
2164 a8083063 Iustin Pop
2165 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2166 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2167 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2168 b352ab5b Iustin Pop
  for disk in instance.disks:
2169 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
2170 b352ab5b Iustin Pop
2171 a8083063 Iustin Pop
  return disks_ok, device_info
2172 a8083063 Iustin Pop
2173 a8083063 Iustin Pop
2174 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
2175 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2176 3ecf6786 Iustin Pop

2177 3ecf6786 Iustin Pop
  """
2178 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
2179 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2180 fe7b0351 Michael Hanselmann
  if not disks_ok:
2181 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
2182 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2183 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
2184 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
2185 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2186 fe7b0351 Michael Hanselmann
2187 fe7b0351 Michael Hanselmann
2188 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2189 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2190 a8083063 Iustin Pop

2191 a8083063 Iustin Pop
  """
2192 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2193 a8083063 Iustin Pop
2194 a8083063 Iustin Pop
  def CheckPrereq(self):
2195 a8083063 Iustin Pop
    """Check prerequisites.
2196 a8083063 Iustin Pop

2197 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2198 a8083063 Iustin Pop

2199 a8083063 Iustin Pop
    """
2200 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2201 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2202 a8083063 Iustin Pop
    if instance is None:
2203 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2204 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2205 a8083063 Iustin Pop
    self.instance = instance
2206 a8083063 Iustin Pop
2207 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2208 a8083063 Iustin Pop
    """Deactivate the disks
2209 a8083063 Iustin Pop

2210 a8083063 Iustin Pop
    """
2211 a8083063 Iustin Pop
    instance = self.instance
2212 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
2213 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
2214 a8083063 Iustin Pop
    if not type(ins_l) is list:
2215 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
2216 3ecf6786 Iustin Pop
                               instance.primary_node)
2217 a8083063 Iustin Pop
2218 a8083063 Iustin Pop
    if self.instance.name in ins_l:
2219 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
2220 3ecf6786 Iustin Pop
                               " block devices.")
2221 a8083063 Iustin Pop
2222 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2223 a8083063 Iustin Pop
2224 a8083063 Iustin Pop
2225 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
2226 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2227 a8083063 Iustin Pop

2228 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2229 a8083063 Iustin Pop

2230 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2231 a8083063 Iustin Pop
  ignored.
2232 a8083063 Iustin Pop

2233 a8083063 Iustin Pop
  """
2234 a8083063 Iustin Pop
  result = True
2235 a8083063 Iustin Pop
  for disk in instance.disks:
2236 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2237 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
2238 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
2239 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
2240 a8083063 Iustin Pop
                     (disk.iv_name, node))
2241 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2242 a8083063 Iustin Pop
          result = False
2243 a8083063 Iustin Pop
  return result
2244 a8083063 Iustin Pop
2245 a8083063 Iustin Pop
2246 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
2247 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2248 d4f16fd9 Iustin Pop

2249 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2250 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2251 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2252 d4f16fd9 Iustin Pop
  exception.
2253 d4f16fd9 Iustin Pop

2254 d4f16fd9 Iustin Pop
  Args:
2255 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
2256 d4f16fd9 Iustin Pop
    - node: the node name
2257 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
2258 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
2259 d4f16fd9 Iustin Pop

2260 d4f16fd9 Iustin Pop
  """
2261 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
2262 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2263 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2264 d4f16fd9 Iustin Pop
                             " information" % (node,))
2265 d4f16fd9 Iustin Pop
2266 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2267 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2268 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2269 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2270 d4f16fd9 Iustin Pop
  if requested > free_mem:
2271 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2272 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2273 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2274 d4f16fd9 Iustin Pop
2275 d4f16fd9 Iustin Pop
2276 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2277 a8083063 Iustin Pop
  """Starts an instance.
2278 a8083063 Iustin Pop

2279 a8083063 Iustin Pop
  """
2280 a8083063 Iustin Pop
  HPATH = "instance-start"
2281 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2282 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2283 a8083063 Iustin Pop
2284 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2285 a8083063 Iustin Pop
    """Build hooks env.
2286 a8083063 Iustin Pop

2287 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2288 a8083063 Iustin Pop

2289 a8083063 Iustin Pop
    """
2290 a8083063 Iustin Pop
    env = {
2291 a8083063 Iustin Pop
      "FORCE": self.op.force,
2292 a8083063 Iustin Pop
      }
2293 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2294 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2295 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2296 a8083063 Iustin Pop
    return env, nl, nl
2297 a8083063 Iustin Pop
2298 a8083063 Iustin Pop
  def CheckPrereq(self):
2299 a8083063 Iustin Pop
    """Check prerequisites.
2300 a8083063 Iustin Pop

2301 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2302 a8083063 Iustin Pop

2303 a8083063 Iustin Pop
    """
2304 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2305 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2306 a8083063 Iustin Pop
    if instance is None:
2307 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2308 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2309 a8083063 Iustin Pop
2310 a8083063 Iustin Pop
    # check bridges existance
2311 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2312 a8083063 Iustin Pop
2313 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2314 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2315 d4f16fd9 Iustin Pop
                         instance.memory)
2316 d4f16fd9 Iustin Pop
2317 a8083063 Iustin Pop
    self.instance = instance
2318 a8083063 Iustin Pop
    self.op.instance_name = instance.name
2319 a8083063 Iustin Pop
2320 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2321 a8083063 Iustin Pop
    """Start the instance.
2322 a8083063 Iustin Pop

2323 a8083063 Iustin Pop
    """
2324 a8083063 Iustin Pop
    instance = self.instance
2325 a8083063 Iustin Pop
    force = self.op.force
2326 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2327 a8083063 Iustin Pop
2328 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2329 fe482621 Iustin Pop
2330 a8083063 Iustin Pop
    node_current = instance.primary_node
2331 a8083063 Iustin Pop
2332 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2333 a8083063 Iustin Pop
2334 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2335 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2336 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2337 a8083063 Iustin Pop
2338 a8083063 Iustin Pop
2339 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2340 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2341 bf6929a2 Alexander Schreiber

2342 bf6929a2 Alexander Schreiber
  """
2343 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2344 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2345 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2346 bf6929a2 Alexander Schreiber
2347 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2348 bf6929a2 Alexander Schreiber
    """Build hooks env.
2349 bf6929a2 Alexander Schreiber

2350 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2351 bf6929a2 Alexander Schreiber

2352 bf6929a2 Alexander Schreiber
    """
2353 bf6929a2 Alexander Schreiber
    env = {
2354 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2355 bf6929a2 Alexander Schreiber
      }
2356 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2357 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2358 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2359 bf6929a2 Alexander Schreiber
    return env, nl, nl
2360 bf6929a2 Alexander Schreiber
2361 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2362 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2363 bf6929a2 Alexander Schreiber

2364 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2365 bf6929a2 Alexander Schreiber

2366 bf6929a2 Alexander Schreiber
    """
2367 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
2368 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
2369 bf6929a2 Alexander Schreiber
    if instance is None:
2370 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
2371 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
2372 bf6929a2 Alexander Schreiber
2373 bf6929a2 Alexander Schreiber
    # check bridges existance
2374 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2375 bf6929a2 Alexander Schreiber
2376 bf6929a2 Alexander Schreiber
    self.instance = instance
2377 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
2378 bf6929a2 Alexander Schreiber
2379 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2380 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2381 bf6929a2 Alexander Schreiber

2382 bf6929a2 Alexander Schreiber
    """
2383 bf6929a2 Alexander Schreiber
    instance = self.instance
2384 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2385 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2386 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2387 bf6929a2 Alexander Schreiber
2388 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2389 bf6929a2 Alexander Schreiber
2390 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2391 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
2392 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
2393 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2394 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
2395 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2396 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2397 bf6929a2 Alexander Schreiber
2398 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2399 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2400 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2401 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2402 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2403 bf6929a2 Alexander Schreiber
    else:
2404 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2405 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2406 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2407 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2408 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2409 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2410 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2411 bf6929a2 Alexander Schreiber
2412 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2413 bf6929a2 Alexander Schreiber
2414 bf6929a2 Alexander Schreiber
2415 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2416 a8083063 Iustin Pop
  """Shutdown an instance.
2417 a8083063 Iustin Pop

2418 a8083063 Iustin Pop
  """
2419 a8083063 Iustin Pop
  HPATH = "instance-stop"
2420 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2421 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2422 a8083063 Iustin Pop
2423 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2424 a8083063 Iustin Pop
    """Build hooks env.
2425 a8083063 Iustin Pop

2426 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2427 a8083063 Iustin Pop

2428 a8083063 Iustin Pop
    """
2429 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2430 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2431 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2432 a8083063 Iustin Pop
    return env, nl, nl
2433 a8083063 Iustin Pop
2434 a8083063 Iustin Pop
  def CheckPrereq(self):
2435 a8083063 Iustin Pop
    """Check prerequisites.
2436 a8083063 Iustin Pop

2437 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2438 a8083063 Iustin Pop

2439 a8083063 Iustin Pop
    """
2440 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2441 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2442 a8083063 Iustin Pop
    if instance is None:
2443 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2444 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2445 a8083063 Iustin Pop
    self.instance = instance
2446 a8083063 Iustin Pop
2447 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2448 a8083063 Iustin Pop
    """Shutdown the instance.
2449 a8083063 Iustin Pop

2450 a8083063 Iustin Pop
    """
2451 a8083063 Iustin Pop
    instance = self.instance
2452 a8083063 Iustin Pop
    node_current = instance.primary_node
2453 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2454 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2455 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2456 a8083063 Iustin Pop
2457 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2458 a8083063 Iustin Pop
2459 a8083063 Iustin Pop
2460 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2461 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2462 fe7b0351 Michael Hanselmann

2463 fe7b0351 Michael Hanselmann
  """
2464 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2465 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2466 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2467 fe7b0351 Michael Hanselmann
2468 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2469 fe7b0351 Michael Hanselmann
    """Build hooks env.
2470 fe7b0351 Michael Hanselmann

2471 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2472 fe7b0351 Michael Hanselmann

2473 fe7b0351 Michael Hanselmann
    """
2474 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2475 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2476 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2477 fe7b0351 Michael Hanselmann
    return env, nl, nl
2478 fe7b0351 Michael Hanselmann
2479 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2480 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2481 fe7b0351 Michael Hanselmann

2482 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2483 fe7b0351 Michael Hanselmann

2484 fe7b0351 Michael Hanselmann
    """
2485 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2486 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2487 fe7b0351 Michael Hanselmann
    if instance is None:
2488 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2489 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2490 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2491 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2492 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2493 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2494 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2495 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2496 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2497 fe7b0351 Michael Hanselmann
    if remote_info:
2498 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2499 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2500 3ecf6786 Iustin Pop
                                  instance.primary_node))
2501 d0834de3 Michael Hanselmann
2502 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2503 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2504 d0834de3 Michael Hanselmann
      # OS verification
2505 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2506 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2507 d0834de3 Michael Hanselmann
      if pnode is None:
2508 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2509 3ecf6786 Iustin Pop
                                   self.op.pnode)
2510 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2511 dfa96ded Guido Trotter
      if not os_obj:
2512 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2513 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2514 d0834de3 Michael Hanselmann
2515 fe7b0351 Michael Hanselmann
    self.instance = instance
2516 fe7b0351 Michael Hanselmann
2517 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2518 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2519 fe7b0351 Michael Hanselmann

2520 fe7b0351 Michael Hanselmann
    """
2521 fe7b0351 Michael Hanselmann
    inst = self.instance
2522 fe7b0351 Michael Hanselmann
2523 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2524 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2525 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2526 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2527 d0834de3 Michael Hanselmann
2528 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2529 fe7b0351 Michael Hanselmann
    try:
2530 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2531 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2532 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2533 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2534 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2535 fe7b0351 Michael Hanselmann
    finally:
2536 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2537 fe7b0351 Michael Hanselmann
2538 fe7b0351 Michael Hanselmann
2539 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2540 decd5f45 Iustin Pop
  """Rename an instance.
2541 decd5f45 Iustin Pop

2542 decd5f45 Iustin Pop
  """
2543 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2544 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2545 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2546 decd5f45 Iustin Pop
2547 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2548 decd5f45 Iustin Pop
    """Build hooks env.
2549 decd5f45 Iustin Pop

2550 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2551 decd5f45 Iustin Pop

2552 decd5f45 Iustin Pop
    """
2553 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2554 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2555 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2556 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2557 decd5f45 Iustin Pop
    return env, nl, nl
2558 decd5f45 Iustin Pop
2559 decd5f45 Iustin Pop
  def CheckPrereq(self):
2560 decd5f45 Iustin Pop
    """Check prerequisites.
2561 decd5f45 Iustin Pop

2562 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2563 decd5f45 Iustin Pop

2564 decd5f45 Iustin Pop
    """
2565 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2566 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2567 decd5f45 Iustin Pop
    if instance is None:
2568 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2569 decd5f45 Iustin Pop
                                 self.op.instance_name)
2570 decd5f45 Iustin Pop
    if instance.status != "down":
2571 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2572 decd5f45 Iustin Pop
                                 self.op.instance_name)
2573 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2574 decd5f45 Iustin Pop
    if remote_info:
2575 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2576 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2577 decd5f45 Iustin Pop
                                  instance.primary_node))
2578 decd5f45 Iustin Pop
    self.instance = instance
2579 decd5f45 Iustin Pop
2580 decd5f45 Iustin Pop
    # new name verification
2581 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2582 decd5f45 Iustin Pop
2583 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2584 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2585 7bde3275 Guido Trotter
    if new_name in instance_list:
2586 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2587 c09f363f Manuel Franceschini
                                 new_name)
2588 7bde3275 Guido Trotter
2589 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2590 89e1fc26 Iustin Pop
      command = ["fping", "-q", name_info.ip]
2591 decd5f45 Iustin Pop
      result = utils.RunCmd(command)
2592 decd5f45 Iustin Pop
      if not result.failed:
2593 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2594 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2595 decd5f45 Iustin Pop
2596 decd5f45 Iustin Pop
2597 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2598 decd5f45 Iustin Pop
    """Reinstall the instance.
2599 decd5f45 Iustin Pop

2600 decd5f45 Iustin Pop
    """
2601 decd5f45 Iustin Pop
    inst = self.instance
2602 decd5f45 Iustin Pop
    old_name = inst.name
2603 decd5f45 Iustin Pop
2604 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2605 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2606 b23c4333 Manuel Franceschini
2607 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2608 decd5f45 Iustin Pop
2609 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2610 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2611 decd5f45 Iustin Pop
2612 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2613 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2614 b23c4333 Manuel Franceschini
      result = rpc.call_file_storage_dir_rename(inst.primary_node,
2615 b23c4333 Manuel Franceschini
                                                old_file_storage_dir,
2616 b23c4333 Manuel Franceschini
                                                new_file_storage_dir)
2617 b23c4333 Manuel Franceschini
2618 b23c4333 Manuel Franceschini
      if not result:
2619 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2620 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2621 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2622 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2623 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2624 b23c4333 Manuel Franceschini
2625 b23c4333 Manuel Franceschini
      if not result[0]:
2626 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2627 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2628 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2629 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2630 b23c4333 Manuel Franceschini
2631 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2632 decd5f45 Iustin Pop
    try:
2633 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2634 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2635 f4bc1f2c Michael Hanselmann
        msg = ("Could run OS rename script for instance %s on node %s (but the"
2636 f4bc1f2c Michael Hanselmann
               " instance has been renamed in Ganeti)" %
2637 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2638 decd5f45 Iustin Pop
        logger.Error(msg)
2639 decd5f45 Iustin Pop
    finally:
2640 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2641 decd5f45 Iustin Pop
2642 decd5f45 Iustin Pop
2643 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2644 a8083063 Iustin Pop
  """Remove an instance.
2645 a8083063 Iustin Pop

2646 a8083063 Iustin Pop
  """
2647 a8083063 Iustin Pop
  HPATH = "instance-remove"
2648 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2649 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2650 a8083063 Iustin Pop
2651 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2652 a8083063 Iustin Pop
    """Build hooks env.
2653 a8083063 Iustin Pop

2654 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2655 a8083063 Iustin Pop

2656 a8083063 Iustin Pop
    """
2657 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2658 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2659 a8083063 Iustin Pop
    return env, nl, nl
2660 a8083063 Iustin Pop
2661 a8083063 Iustin Pop
  def CheckPrereq(self):
2662 a8083063 Iustin Pop
    """Check prerequisites.
2663 a8083063 Iustin Pop

2664 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2665 a8083063 Iustin Pop

2666 a8083063 Iustin Pop
    """
2667 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2668 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2669 a8083063 Iustin Pop
    if instance is None:
2670 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2671 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2672 a8083063 Iustin Pop
    self.instance = instance
2673 a8083063 Iustin Pop
2674 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2675 a8083063 Iustin Pop
    """Remove the instance.
2676 a8083063 Iustin Pop

2677 a8083063 Iustin Pop
    """
2678 a8083063 Iustin Pop
    instance = self.instance
2679 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2680 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2681 a8083063 Iustin Pop
2682 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2683 1d67656e Iustin Pop
      if self.op.ignore_failures:
2684 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2685 1d67656e Iustin Pop
      else:
2686 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2687 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2688 a8083063 Iustin Pop
2689 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2690 a8083063 Iustin Pop
2691 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2692 1d67656e Iustin Pop
      if self.op.ignore_failures:
2693 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2694 1d67656e Iustin Pop
      else:
2695 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2696 a8083063 Iustin Pop
2697 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2698 a8083063 Iustin Pop
2699 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2700 a8083063 Iustin Pop
2701 a8083063 Iustin Pop
2702 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2703 a8083063 Iustin Pop
  """Logical unit for querying instances.
2704 a8083063 Iustin Pop

2705 a8083063 Iustin Pop
  """
2706 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2707 a8083063 Iustin Pop
2708 a8083063 Iustin Pop
  def CheckPrereq(self):
2709 a8083063 Iustin Pop
    """Check prerequisites.
2710 a8083063 Iustin Pop

2711 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2712 a8083063 Iustin Pop

2713 a8083063 Iustin Pop
    """
2714 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2715 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2716 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2717 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2718 d6d415e8 Iustin Pop
                               "sda_size", "sdb_size", "vcpus"],
2719 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2720 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2721 a8083063 Iustin Pop
2722 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2723 069dcc86 Iustin Pop
2724 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2725 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2726 a8083063 Iustin Pop

2727 a8083063 Iustin Pop
    """
2728 069dcc86 Iustin Pop
    instance_names = self.wanted
2729 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2730 a8083063 Iustin Pop
                     in instance_names]
2731 a8083063 Iustin Pop
2732 a8083063 Iustin Pop
    # begin data gathering
2733 a8083063 Iustin Pop
2734 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2735 a8083063 Iustin Pop
2736 a8083063 Iustin Pop
    bad_nodes = []
2737 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2738 a8083063 Iustin Pop
      live_data = {}
2739 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2740 a8083063 Iustin Pop
      for name in nodes:
2741 a8083063 Iustin Pop
        result = node_data[name]
2742 a8083063 Iustin Pop
        if result:
2743 a8083063 Iustin Pop
          live_data.update(result)
2744 a8083063 Iustin Pop
        elif result == False:
2745 a8083063 Iustin Pop
          bad_nodes.append(name)
2746 a8083063 Iustin Pop
        # else no instance is alive
2747 a8083063 Iustin Pop
    else:
2748 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2749 a8083063 Iustin Pop
2750 a8083063 Iustin Pop
    # end data gathering
2751 a8083063 Iustin Pop
2752 a8083063 Iustin Pop
    output = []
2753 a8083063 Iustin Pop
    for instance in instance_list:
2754 a8083063 Iustin Pop
      iout = []
2755 a8083063 Iustin Pop
      for field in self.op.output_fields:
2756 a8083063 Iustin Pop
        if field == "name":
2757 a8083063 Iustin Pop
          val = instance.name
2758 a8083063 Iustin Pop
        elif field == "os":
2759 a8083063 Iustin Pop
          val = instance.os
2760 a8083063 Iustin Pop
        elif field == "pnode":
2761 a8083063 Iustin Pop
          val = instance.primary_node
2762 a8083063 Iustin Pop
        elif field == "snodes":
2763 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2764 a8083063 Iustin Pop
        elif field == "admin_state":
2765 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2766 a8083063 Iustin Pop
        elif field == "oper_state":
2767 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2768 8a23d2d3 Iustin Pop
            val = None
2769 a8083063 Iustin Pop
          else:
2770 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2771 d8052456 Iustin Pop
        elif field == "status":
2772 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2773 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2774 d8052456 Iustin Pop
          else:
2775 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2776 d8052456 Iustin Pop
            if running:
2777 d8052456 Iustin Pop
              if instance.status != "down":
2778 d8052456 Iustin Pop
                val = "running"
2779 d8052456 Iustin Pop
              else:
2780 d8052456 Iustin Pop
                val = "ERROR_up"
2781 d8052456 Iustin Pop
            else:
2782 d8052456 Iustin Pop
              if instance.status != "down":
2783 d8052456 Iustin Pop
                val = "ERROR_down"
2784 d8052456 Iustin Pop
              else:
2785 d8052456 Iustin Pop
                val = "ADMIN_down"
2786 a8083063 Iustin Pop
        elif field == "admin_ram":
2787 a8083063 Iustin Pop
          val = instance.memory
2788 a8083063 Iustin Pop
        elif field == "oper_ram":
2789 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2790 8a23d2d3 Iustin Pop
            val = None
2791 a8083063 Iustin Pop
          elif instance.name in live_data:
2792 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2793 a8083063 Iustin Pop
          else:
2794 a8083063 Iustin Pop
            val = "-"
2795 a8083063 Iustin Pop
        elif field == "disk_template":
2796 a8083063 Iustin Pop
          val = instance.disk_template
2797 a8083063 Iustin Pop
        elif field == "ip":
2798 a8083063 Iustin Pop
          val = instance.nics[0].ip
2799 a8083063 Iustin Pop
        elif field == "bridge":
2800 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2801 a8083063 Iustin Pop
        elif field == "mac":
2802 a8083063 Iustin Pop
          val = instance.nics[0].mac
2803 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2804 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2805 644eeef9 Iustin Pop
          if disk is None:
2806 8a23d2d3 Iustin Pop
            val = None
2807 644eeef9 Iustin Pop
          else:
2808 644eeef9 Iustin Pop
            val = disk.size
2809 d6d415e8 Iustin Pop
        elif field == "vcpus":
2810 d6d415e8 Iustin Pop
          val = instance.vcpus
2811 a8083063 Iustin Pop
        else:
2812 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2813 a8083063 Iustin Pop
        iout.append(val)
2814 a8083063 Iustin Pop
      output.append(iout)
2815 a8083063 Iustin Pop
2816 a8083063 Iustin Pop
    return output
2817 a8083063 Iustin Pop
2818 a8083063 Iustin Pop
2819 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2820 a8083063 Iustin Pop
  """Failover an instance.
2821 a8083063 Iustin Pop

2822 a8083063 Iustin Pop
  """
2823 a8083063 Iustin Pop
  HPATH = "instance-failover"
2824 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2825 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2826 a8083063 Iustin Pop
2827 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2828 a8083063 Iustin Pop
    """Build hooks env.
2829 a8083063 Iustin Pop

2830 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2831 a8083063 Iustin Pop

2832 a8083063 Iustin Pop
    """
2833 a8083063 Iustin Pop
    env = {
2834 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2835 a8083063 Iustin Pop
      }
2836 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2837 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2838 a8083063 Iustin Pop
    return env, nl, nl
2839 a8083063 Iustin Pop
2840 a8083063 Iustin Pop
  def CheckPrereq(self):
2841 a8083063 Iustin Pop
    """Check prerequisites.
2842 a8083063 Iustin Pop

2843 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2844 a8083063 Iustin Pop

2845 a8083063 Iustin Pop
    """
2846 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2847 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2848 a8083063 Iustin Pop
    if instance is None:
2849 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2850 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2851 a8083063 Iustin Pop
2852 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2853 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2854 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2855 2a710df1 Michael Hanselmann
2856 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2857 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2858 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2859 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2860 2a710df1 Michael Hanselmann
2861 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2862 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2863 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2864 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2865 3a7c308e Guido Trotter
2866 a8083063 Iustin Pop
    # check bridge existance
2867 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2868 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2869 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2870 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2871 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2872 a8083063 Iustin Pop
2873 a8083063 Iustin Pop
    self.instance = instance
2874 a8083063 Iustin Pop
2875 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2876 a8083063 Iustin Pop
    """Failover an instance.
2877 a8083063 Iustin Pop

2878 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2879 a8083063 Iustin Pop
    starting it on the secondary.
2880 a8083063 Iustin Pop

2881 a8083063 Iustin Pop
    """
2882 a8083063 Iustin Pop
    instance = self.instance
2883 a8083063 Iustin Pop
2884 a8083063 Iustin Pop
    source_node = instance.primary_node
2885 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2886 a8083063 Iustin Pop
2887 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2888 a8083063 Iustin Pop
    for dev in instance.disks:
2889 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2890 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2891 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2892 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2893 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2894 a8083063 Iustin Pop
2895 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2896 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2897 a8083063 Iustin Pop
                (instance.name, source_node))
2898 a8083063 Iustin Pop
2899 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2900 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2901 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2902 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2903 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2904 24a40d57 Iustin Pop
      else:
2905 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2906 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2907 a8083063 Iustin Pop
2908 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2909 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2910 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2911 a8083063 Iustin Pop
2912 a8083063 Iustin Pop
    instance.primary_node = target_node
2913 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2914 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2915 a8083063 Iustin Pop
2916 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2917 12a0cfbe Guido Trotter
    if instance.status == "up":
2918 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2919 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2920 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2921 12a0cfbe Guido Trotter
2922 12a0cfbe Guido Trotter
      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2923 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2924 12a0cfbe Guido Trotter
      if not disks_ok:
2925 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2926 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2927 a8083063 Iustin Pop
2928 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2929 12a0cfbe Guido Trotter
      if not rpc.call_instance_start(target_node, instance, None):
2930 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2931 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2932 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2933 a8083063 Iustin Pop
2934 a8083063 Iustin Pop
2935 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2936 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2937 a8083063 Iustin Pop

2938 a8083063 Iustin Pop
  This always creates all devices.
2939 a8083063 Iustin Pop

2940 a8083063 Iustin Pop
  """
2941 a8083063 Iustin Pop
  if device.children:
2942 a8083063 Iustin Pop
    for child in device.children:
2943 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2944 a8083063 Iustin Pop
        return False
2945 a8083063 Iustin Pop
2946 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2947 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2948 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2949 a8083063 Iustin Pop
  if not new_id:
2950 a8083063 Iustin Pop
    return False
2951 a8083063 Iustin Pop
  if device.physical_id is None:
2952 a8083063 Iustin Pop
    device.physical_id = new_id
2953 a8083063 Iustin Pop
  return True
2954 a8083063 Iustin Pop
2955 a8083063 Iustin Pop
2956 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2957 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2958 a8083063 Iustin Pop

2959 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2960 a8083063 Iustin Pop
  all its children.
2961 a8083063 Iustin Pop

2962 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2963 a8083063 Iustin Pop

2964 a8083063 Iustin Pop
  """
2965 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2966 a8083063 Iustin Pop
    force = True
2967 a8083063 Iustin Pop
  if device.children:
2968 a8083063 Iustin Pop
    for child in device.children:
2969 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2970 3f78eef2 Iustin Pop
                                        child, force, info):
2971 a8083063 Iustin Pop
        return False
2972 a8083063 Iustin Pop
2973 a8083063 Iustin Pop
  if not force:
2974 a8083063 Iustin Pop
    return True
2975 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2976 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2977 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2978 a8083063 Iustin Pop
  if not new_id:
2979 a8083063 Iustin Pop
    return False
2980 a8083063 Iustin Pop
  if device.physical_id is None:
2981 a8083063 Iustin Pop
    device.physical_id = new_id
2982 a8083063 Iustin Pop
  return True
2983 a8083063 Iustin Pop
2984 a8083063 Iustin Pop
2985 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2986 923b1523 Iustin Pop
  """Generate a suitable LV name.
2987 923b1523 Iustin Pop

2988 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2989 923b1523 Iustin Pop

2990 923b1523 Iustin Pop
  """
2991 923b1523 Iustin Pop
  results = []
2992 923b1523 Iustin Pop
  for val in exts:
2993 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2994 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2995 923b1523 Iustin Pop
  return results
2996 923b1523 Iustin Pop
2997 923b1523 Iustin Pop
2998 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2999 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
3000 a8083063 Iustin Pop

3001 a8083063 Iustin Pop
  """
3002 a8083063 Iustin Pop
  port = cfg.AllocatePort()
3003 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
3004 fe96220b Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3005 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
3006 fe96220b Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3007 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
3008 fe96220b Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
3009 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
3010 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
3011 a8083063 Iustin Pop
  return drbd_dev
3012 a8083063 Iustin Pop
3013 a8083063 Iustin Pop
3014 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
3015 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
3016 a1f445d3 Iustin Pop

3017 a1f445d3 Iustin Pop
  """
3018 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
3019 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
3020 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3021 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
3022 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3023 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
3024 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3025 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
3026 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
3027 a1f445d3 Iustin Pop
                          iv_name=iv_name)
3028 a1f445d3 Iustin Pop
  return drbd_dev
3029 a1f445d3 Iustin Pop
3030 7c0d6283 Michael Hanselmann
3031 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
3032 a8083063 Iustin Pop
                          instance_name, primary_node,
3033 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
3034 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
3035 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
3036 a8083063 Iustin Pop

3037 a8083063 Iustin Pop
  """
3038 a8083063 Iustin Pop
  #TODO: compute space requirements
3039 a8083063 Iustin Pop
3040 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
3041 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
3042 a8083063 Iustin Pop
    disks = []
3043 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
3044 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
3045 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3046 923b1523 Iustin Pop
3047 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
3048 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
3049 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
3050 a8083063 Iustin Pop
                           iv_name = "sda")
3051 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
3052 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
3053 a8083063 Iustin Pop
                           iv_name = "sdb")
3054 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
3055 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
3056 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
3057 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3058 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
3059 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
3060 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
3061 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
3062 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
3063 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
3064 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
3065 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
3066 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3067 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3068 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3069 0f1a06e3 Manuel Franceschini
3070 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
3071 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
3072 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
3073 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
3074 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
3075 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
3076 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
3077 a8083063 Iustin Pop
  else:
3078 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3079 a8083063 Iustin Pop
  return disks
3080 a8083063 Iustin Pop
3081 a8083063 Iustin Pop
3082 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3083 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3084 3ecf6786 Iustin Pop

3085 3ecf6786 Iustin Pop
  """
3086 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3087 a0c3fea1 Michael Hanselmann
3088 a0c3fea1 Michael Hanselmann
3089 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
3090 a8083063 Iustin Pop
  """Create all disks for an instance.
3091 a8083063 Iustin Pop

3092 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3093 a8083063 Iustin Pop

3094 a8083063 Iustin Pop
  Args:
3095 a8083063 Iustin Pop
    instance: the instance object
3096 a8083063 Iustin Pop

3097 a8083063 Iustin Pop
  Returns:
3098 a8083063 Iustin Pop
    True or False showing the success of the creation process
3099 a8083063 Iustin Pop

3100 a8083063 Iustin Pop
  """
3101 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3102 a0c3fea1 Michael Hanselmann
3103 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3104 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3105 0f1a06e3 Manuel Franceschini
    result = rpc.call_file_storage_dir_create(instance.primary_node,
3106 0f1a06e3 Manuel Franceschini
                                              file_storage_dir)
3107 0f1a06e3 Manuel Franceschini
3108 0f1a06e3 Manuel Franceschini
    if not result:
3109 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
3110 0f1a06e3 Manuel Franceschini
      return False
3111 0f1a06e3 Manuel Franceschini
3112 0f1a06e3 Manuel Franceschini
    if not result[0]:
3113 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
3114 0f1a06e3 Manuel Franceschini
      return False
3115 0f1a06e3 Manuel Franceschini
3116 a8083063 Iustin Pop
  for device in instance.disks:
3117 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
3118 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
3119 a8083063 Iustin Pop
    #HARDCODE
3120 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3121 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
3122 3f78eef2 Iustin Pop
                                        device, False, info):
3123 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
3124 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
3125 a8083063 Iustin Pop
        return False
3126 a8083063 Iustin Pop
    #HARDCODE
3127 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3128 3f78eef2 Iustin Pop
                                    instance, device, info):
3129 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
3130 a8083063 Iustin Pop
                   device.iv_name)
3131 a8083063 Iustin Pop
      return False
3132 1c6e3627 Manuel Franceschini
3133 a8083063 Iustin Pop
  return True
3134 a8083063 Iustin Pop
3135 a8083063 Iustin Pop
3136 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
3137 a8083063 Iustin Pop
  """Remove all disks for an instance.
3138 a8083063 Iustin Pop

3139 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3140 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3141 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3142 a8083063 Iustin Pop
  with `_CreateDisks()`).
3143 a8083063 Iustin Pop

3144 a8083063 Iustin Pop
  Args:
3145 a8083063 Iustin Pop
    instance: the instance object
3146 a8083063 Iustin Pop

3147 a8083063 Iustin Pop
  Returns:
3148 a8083063 Iustin Pop
    True or False showing the success of the removal proces
3149 a8083063 Iustin Pop

3150 a8083063 Iustin Pop
  """
3151 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
3152 a8083063 Iustin Pop
3153 a8083063 Iustin Pop
  result = True
3154 a8083063 Iustin Pop
  for device in instance.disks:
3155 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3156 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
3157 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
3158 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
3159 a8083063 Iustin Pop
                     " continuing anyway" %
3160 a8083063 Iustin Pop
                     (device.iv_name, node))
3161 a8083063 Iustin Pop
        result = False
3162 0f1a06e3 Manuel Franceschini
3163 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3164 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3165 0f1a06e3 Manuel Franceschini
    if not rpc.call_file_storage_dir_remove(instance.primary_node,
3166 0f1a06e3 Manuel Franceschini
                                            file_storage_dir):
3167 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
3168 0f1a06e3 Manuel Franceschini
      result = False
3169 0f1a06e3 Manuel Franceschini
3170 a8083063 Iustin Pop
  return result
3171 a8083063 Iustin Pop
3172 a8083063 Iustin Pop
3173 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
3174 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3175 e2fe6369 Iustin Pop

3176 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
3177 e2fe6369 Iustin Pop

3178 e2fe6369 Iustin Pop
  """
3179 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3180 e2fe6369 Iustin Pop
  req_size_dict = {
3181 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3182 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
3183 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
3184 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
3185 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3186 e2fe6369 Iustin Pop
  }
3187 e2fe6369 Iustin Pop
3188 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3189 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3190 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3191 e2fe6369 Iustin Pop
3192 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3193 e2fe6369 Iustin Pop
3194 e2fe6369 Iustin Pop
3195 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3196 a8083063 Iustin Pop
  """Create an instance.
3197 a8083063 Iustin Pop

3198 a8083063 Iustin Pop
  """
3199 a8083063 Iustin Pop
  HPATH = "instance-add"
3200 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3201 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
3202 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
3203 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
3204 a8083063 Iustin Pop
3205 538475ca Iustin Pop
  def _RunAllocator(self):
3206 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3207 538475ca Iustin Pop

3208 538475ca Iustin Pop
    """
3209 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3210 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3211 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3212 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3213 d1c2dd75 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3214 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3215 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3216 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3217 d1c2dd75 Iustin Pop
                     tags=[],
3218 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3219 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
3220 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
3221 d1c2dd75 Iustin Pop
                     disks=disks,
3222 d1c2dd75 Iustin Pop
                     nics=nics,
3223 29859cb7 Iustin Pop
                     )
3224 d1c2dd75 Iustin Pop
3225 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3226 d1c2dd75 Iustin Pop
3227 d1c2dd75 Iustin Pop
    if not ial.success:
3228 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3229 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3230 d1c2dd75 Iustin Pop
                                                           ial.info))
3231 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3232 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3233 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3234 27579978 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3235 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3236 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
3237 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
3238 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
3239 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
3240 27579978 Iustin Pop
    if ial.required_nodes == 2:
3241 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3242 538475ca Iustin Pop
3243 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3244 a8083063 Iustin Pop
    """Build hooks env.
3245 a8083063 Iustin Pop

3246 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3247 a8083063 Iustin Pop

3248 a8083063 Iustin Pop
    """
3249 a8083063 Iustin Pop
    env = {
3250 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3251 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3252 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3253 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3254 a8083063 Iustin Pop
      }
3255 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3256 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3257 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3258 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
3259 396e1b78 Michael Hanselmann
3260 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3261 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3262 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3263 396e1b78 Michael Hanselmann
      status=self.instance_status,
3264 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3265 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
3266 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
3267 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3268 396e1b78 Michael Hanselmann
    ))
3269 a8083063 Iustin Pop
3270 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
3271 a8083063 Iustin Pop
          self.secondaries)
3272 a8083063 Iustin Pop
    return env, nl, nl
3273 a8083063 Iustin Pop
3274 a8083063 Iustin Pop
3275 a8083063 Iustin Pop
  def CheckPrereq(self):
3276 a8083063 Iustin Pop
    """Check prerequisites.
3277 a8083063 Iustin Pop

3278 a8083063 Iustin Pop
    """
3279 538475ca Iustin Pop
    # set optional parameters to none if they don't exist
3280 538475ca Iustin Pop
    for attr in ["kernel_path", "initrd_path", "hvm_boot_order", "pnode",
3281 538475ca Iustin Pop
                 "iallocator"]:
3282 40ed12dd Guido Trotter
      if not hasattr(self.op, attr):
3283 40ed12dd Guido Trotter
        setattr(self.op, attr, None)
3284 40ed12dd Guido Trotter
3285 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
3286 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
3287 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3288 3ecf6786 Iustin Pop
                                 self.op.mode)
3289 a8083063 Iustin Pop
3290 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3291 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3292 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3293 eedc99de Manuel Franceschini
                                 " instances")
3294 eedc99de Manuel Franceschini
3295 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3296 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
3297 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
3298 a8083063 Iustin Pop
      if src_node is None or src_path is None:
3299 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
3300 3ecf6786 Iustin Pop
                                   " node and path options")
3301 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
3302 a8083063 Iustin Pop
      if src_node_full is None:
3303 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
3304 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
3305 a8083063 Iustin Pop
3306 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
3307 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
3308 a8083063 Iustin Pop
3309 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
3310 a8083063 Iustin Pop
3311 a8083063 Iustin Pop
      if not export_info:
3312 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3313 a8083063 Iustin Pop
3314 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3315 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3316 a8083063 Iustin Pop
3317 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3318 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3319 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3320 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3321 a8083063 Iustin Pop
3322 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3323 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3324 3ecf6786 Iustin Pop
                                   " one data disk")
3325 a8083063 Iustin Pop
3326 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3327 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3328 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3329 a8083063 Iustin Pop
                                                         'disk0_dump'))
3330 a8083063 Iustin Pop
      self.src_image = diskimage
3331 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
3332 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
3333 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
3334 a8083063 Iustin Pop
3335 901a65c1 Iustin Pop
    #### instance parameters check
3336 901a65c1 Iustin Pop
3337 a8083063 Iustin Pop
    # disk template and mirror node verification
3338 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3339 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
3340 a8083063 Iustin Pop
3341 901a65c1 Iustin Pop
    # instance name verification
3342 901a65c1 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
3343 901a65c1 Iustin Pop
3344 901a65c1 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
3345 901a65c1 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
3346 901a65c1 Iustin Pop
    if instance_name in instance_list:
3347 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3348 901a65c1 Iustin Pop
                                 instance_name)
3349 901a65c1 Iustin Pop
3350 901a65c1 Iustin Pop
    # ip validity checks
3351 901a65c1 Iustin Pop
    ip = getattr(self.op, "ip", None)
3352 901a65c1 Iustin Pop
    if ip is None or ip.lower() == "none":
3353 901a65c1 Iustin Pop
      inst_ip = None
3354 901a65c1 Iustin Pop
    elif ip.lower() == "auto":
3355 901a65c1 Iustin Pop
      inst_ip = hostname1.ip
3356 901a65c1 Iustin Pop
    else:
3357 901a65c1 Iustin Pop
      if not utils.IsValidIP(ip):
3358 901a65c1 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3359 901a65c1 Iustin Pop
                                   " like a valid IP" % ip)
3360 901a65c1 Iustin Pop
      inst_ip = ip
3361 901a65c1 Iustin Pop
    self.inst_ip = self.op.ip = inst_ip
3362 901a65c1 Iustin Pop
3363 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3364 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3365 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3366 901a65c1 Iustin Pop
3367 901a65c1 Iustin Pop
    if self.op.ip_check:
3368 901a65c1 Iustin Pop
      if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT):
3369 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3370 901a65c1 Iustin Pop
                                   (hostname1.ip, instance_name))
3371 901a65c1 Iustin Pop
3372 901a65c1 Iustin Pop
    # MAC address verification
3373 901a65c1 Iustin Pop
    if self.op.mac != "auto":
3374 901a65c1 Iustin Pop
      if not utils.IsValidMac(self.op.mac.lower()):
3375 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3376 901a65c1 Iustin Pop
                                   self.op.mac)
3377 901a65c1 Iustin Pop
3378 901a65c1 Iustin Pop
    # bridge verification
3379 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3380 901a65c1 Iustin Pop
    if bridge is None:
3381 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3382 901a65c1 Iustin Pop
    else:
3383 901a65c1 Iustin Pop
      self.op.bridge = bridge
3384 901a65c1 Iustin Pop
3385 901a65c1 Iustin Pop
    # boot order verification
3386 901a65c1 Iustin Pop
    if self.op.hvm_boot_order is not None:
3387 901a65c1 Iustin Pop
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3388 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid boot order specified,"
3389 901a65c1 Iustin Pop
                                   " must be one or more of [acdn]")
3390 901a65c1 Iustin Pop
    # file storage checks
3391 0f1a06e3 Manuel Franceschini
    if (self.op.file_driver and
3392 0f1a06e3 Manuel Franceschini
        not self.op.file_driver in constants.FILE_DRIVER):
3393 0f1a06e3 Manuel Franceschini
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3394 0f1a06e3 Manuel Franceschini
                                 self.op.file_driver)
3395 0f1a06e3 Manuel Franceschini
3396 0f1a06e3 Manuel Franceschini
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3397 b4de68a9 Iustin Pop
      raise errors.OpPrereqError("File storage directory not a relative"
3398 b4de68a9 Iustin Pop
                                 " path")
3399 538475ca Iustin Pop
    #### allocator run
3400 538475ca Iustin Pop
3401 538475ca Iustin Pop
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3402 538475ca Iustin Pop
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3403 538475ca Iustin Pop
                                 " node must be given")
3404 538475ca Iustin Pop
3405 538475ca Iustin Pop
    if self.op.iallocator is not None:
3406 538475ca Iustin Pop
      self._RunAllocator()
3407 0f1a06e3 Manuel Franceschini
3408 901a65c1 Iustin Pop
    #### node related checks
3409 901a65c1 Iustin Pop
3410 901a65c1 Iustin Pop
    # check primary node
3411 901a65c1 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
3412 901a65c1 Iustin Pop
    if pnode is None:
3413 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
3414 901a65c1 Iustin Pop
                                 self.op.pnode)
3415 901a65c1 Iustin Pop
    self.op.pnode = pnode.name
3416 901a65c1 Iustin Pop
    self.pnode = pnode
3417 901a65c1 Iustin Pop
    self.secondaries = []
3418 901a65c1 Iustin Pop
3419 901a65c1 Iustin Pop
    # mirror node verification
3420 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3421 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
3422 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3423 3ecf6786 Iustin Pop
                                   " a mirror node")
3424 a8083063 Iustin Pop
3425 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
3426 a8083063 Iustin Pop
      if snode_name is None:
3427 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
3428 3ecf6786 Iustin Pop
                                   self.op.snode)
3429 a8083063 Iustin Pop
      elif snode_name == pnode.name:
3430 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3431 3ecf6786 Iustin Pop
                                   " the primary node.")
3432 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
3433 a8083063 Iustin Pop
3434 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3435 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3436 ed1ebc60 Guido Trotter
3437 8d75db10 Iustin Pop
    # Check lv size requirements
3438 8d75db10 Iustin Pop
    if req_size is not None:
3439 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3440 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3441 8d75db10 Iustin Pop
      for node in nodenames:
3442 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3443 8d75db10 Iustin Pop
        if not info:
3444 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3445 8d75db10 Iustin Pop
                                     " from node '%s'" % nodeinfo)
3446 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3447 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3448 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3449 8d75db10 Iustin Pop
                                     " node %s" % node)
3450 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3451 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3452 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3453 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3454 ed1ebc60 Guido Trotter
3455 a8083063 Iustin Pop
    # os verification
3456 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3457 dfa96ded Guido Trotter
    if not os_obj:
3458 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3459 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3460 a8083063 Iustin Pop
3461 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3462 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3463 3b6d8c9b Iustin Pop
3464 a8083063 Iustin Pop
3465 901a65c1 Iustin Pop
    # bridge check on primary node
3466 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3467 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3468 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3469 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3470 a8083063 Iustin Pop
3471 a8083063 Iustin Pop
    if self.op.start:
3472 a8083063 Iustin Pop
      self.instance_status = 'up'
3473 a8083063 Iustin Pop
    else:
3474 a8083063 Iustin Pop
      self.instance_status = 'down'
3475 a8083063 Iustin Pop
3476 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3477 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3478 a8083063 Iustin Pop

3479 a8083063 Iustin Pop
    """
3480 a8083063 Iustin Pop
    instance = self.op.instance_name
3481 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3482 a8083063 Iustin Pop
3483 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3484 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3485 1862d460 Alexander Schreiber
    else:
3486 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3487 1862d460 Alexander Schreiber
3488 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3489 a8083063 Iustin Pop
    if self.inst_ip is not None:
3490 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3491 a8083063 Iustin Pop
3492 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3493 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3494 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3495 2a6469d5 Alexander Schreiber
    else:
3496 2a6469d5 Alexander Schreiber
      network_port = None
3497 58acb49d Alexander Schreiber
3498 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3499 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3500 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3501 2c313123 Manuel Franceschini
    else:
3502 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3503 2c313123 Manuel Franceschini
3504 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3505 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3506 0f1a06e3 Manuel Franceschini
                                        self.sstore.GetFileStorageDir(),
3507 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3508 0f1a06e3 Manuel Franceschini
3509 0f1a06e3 Manuel Franceschini
3510 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3511 a8083063 Iustin Pop
                                  self.op.disk_template,
3512 a8083063 Iustin Pop
                                  instance, pnode_name,
3513 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3514 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3515 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3516 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3517 a8083063 Iustin Pop
3518 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3519 a8083063 Iustin Pop
                            primary_node=pnode_name,
3520 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3521 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3522 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3523 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3524 a8083063 Iustin Pop
                            status=self.instance_status,
3525 58acb49d Alexander Schreiber
                            network_port=network_port,
3526 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3527 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3528 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3529 a8083063 Iustin Pop
                            )
3530 a8083063 Iustin Pop
3531 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3532 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3533 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3534 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3535 a8083063 Iustin Pop
3536 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3537 a8083063 Iustin Pop
3538 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3539 a8083063 Iustin Pop
3540 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3541 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3542 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3543 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3544 a8083063 Iustin Pop
      time.sleep(15)
3545 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3546 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3547 a8083063 Iustin Pop
    else:
3548 a8083063 Iustin Pop
      disk_abort = False
3549 a8083063 Iustin Pop
3550 a8083063 Iustin Pop
    if disk_abort:
3551 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3552 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3553 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3554 3ecf6786 Iustin Pop
                               " this instance")
3555 a8083063 Iustin Pop
3556 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3557 a8083063 Iustin Pop
                (instance, pnode_name))
3558 a8083063 Iustin Pop
3559 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3560 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3561 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3562 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3563 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3564 3ecf6786 Iustin Pop
                                   " on node %s" %
3565 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3566 a8083063 Iustin Pop
3567 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3568 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3569 a8083063 Iustin Pop
        src_node = self.op.src_node
3570 a8083063 Iustin Pop
        src_image = self.src_image
3571 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3572 a8083063 Iustin Pop
                                                src_node, src_image):
3573 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3574 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3575 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3576 a8083063 Iustin Pop
      else:
3577 a8083063 Iustin Pop
        # also checked in the prereq part
3578 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3579 3ecf6786 Iustin Pop
                                     % self.op.mode)
3580 a8083063 Iustin Pop
3581 a8083063 Iustin Pop
    if self.op.start:
3582 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3583 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3584 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3585 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3586 a8083063 Iustin Pop
3587 a8083063 Iustin Pop
3588 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3589 a8083063 Iustin Pop
  """Connect to an instance's console.
3590 a8083063 Iustin Pop

3591 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3592 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3593 a8083063 Iustin Pop
  console.
3594 a8083063 Iustin Pop

3595 a8083063 Iustin Pop
  """
3596 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3597 a8083063 Iustin Pop
3598 a8083063 Iustin Pop
  def CheckPrereq(self):
3599 a8083063 Iustin Pop
    """Check prerequisites.
3600 a8083063 Iustin Pop

3601 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3602 a8083063 Iustin Pop

3603 a8083063 Iustin Pop
    """
3604 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3605 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3606 a8083063 Iustin Pop
    if instance is None:
3607 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3608 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3609 a8083063 Iustin Pop
    self.instance = instance
3610 a8083063 Iustin Pop
3611 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3612 a8083063 Iustin Pop
    """Connect to the console of an instance
3613 a8083063 Iustin Pop

3614 a8083063 Iustin Pop
    """
3615 a8083063 Iustin Pop
    instance = self.instance
3616 a8083063 Iustin Pop
    node = instance.primary_node
3617 a8083063 Iustin Pop
3618 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3619 a8083063 Iustin Pop
    if node_insts is False:
3620 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3621 a8083063 Iustin Pop
3622 a8083063 Iustin Pop
    if instance.name not in node_insts:
3623 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3624 a8083063 Iustin Pop
3625 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3626 a8083063 Iustin Pop
3627 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3628 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3629 b047857b Michael Hanselmann
3630 82122173 Iustin Pop
    # build ssh cmdline
3631 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3632 a8083063 Iustin Pop
3633 a8083063 Iustin Pop
3634 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3635 a8083063 Iustin Pop
  """Replace the disks of an instance.
3636 a8083063 Iustin Pop

3637 a8083063 Iustin Pop
  """
3638 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3639 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3640 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3641 a8083063 Iustin Pop
3642 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3643 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3644 b6e82a65 Iustin Pop

3645 b6e82a65 Iustin Pop
    """
3646 b6e82a65 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3647 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3648 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3649 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3650 b6e82a65 Iustin Pop
3651 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3652 b6e82a65 Iustin Pop
3653 b6e82a65 Iustin Pop
    if not ial.success:
3654 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3655 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3656 b6e82a65 Iustin Pop
                                                           ial.info))
3657 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3658 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3659 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3660 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3661 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3662 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3663 b6e82a65 Iustin Pop
                    self.op.remote_node)
3664 b6e82a65 Iustin Pop
3665 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3666 a8083063 Iustin Pop
    """Build hooks env.
3667 a8083063 Iustin Pop

3668 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3669 a8083063 Iustin Pop

3670 a8083063 Iustin Pop
    """
3671 a8083063 Iustin Pop
    env = {
3672 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3673 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3674 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3675 a8083063 Iustin Pop
      }
3676 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3677 0834c866 Iustin Pop
    nl = [
3678 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3679 0834c866 Iustin Pop
      self.instance.primary_node,
3680 0834c866 Iustin Pop
      ]
3681 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3682 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3683 a8083063 Iustin Pop
    return env, nl, nl
3684 a8083063 Iustin Pop
3685 a8083063 Iustin Pop
  def CheckPrereq(self):
3686 a8083063 Iustin Pop
    """Check prerequisites.
3687 a8083063 Iustin Pop

3688 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3689 a8083063 Iustin Pop

3690 a8083063 Iustin Pop
    """
3691 b6e82a65 Iustin Pop
    if not hasattr(self.op, "remote_node"):
3692 b6e82a65 Iustin Pop
      self.op.remote_node = None
3693 b6e82a65 Iustin Pop
3694 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3695 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3696 a8083063 Iustin Pop
    if instance is None:
3697 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3698 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3699 a8083063 Iustin Pop
    self.instance = instance
3700 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3701 a8083063 Iustin Pop
3702 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3703 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3704 a9e0c397 Iustin Pop
                                 " network mirrored.")
3705 a8083063 Iustin Pop
3706 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3707 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3708 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3709 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3710 a8083063 Iustin Pop
3711 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3712 a9e0c397 Iustin Pop
3713 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3714 b6e82a65 Iustin Pop
    if ia_name is not None:
3715 b6e82a65 Iustin Pop
      if self.op.remote_node is not None:
3716 b6e82a65 Iustin Pop
        raise errors.OpPrereqError("Give either the iallocator or the new"
3717 b6e82a65 Iustin Pop
                                   " secondary, not both")
3718 b6e82a65 Iustin Pop
      self.op.remote_node = self._RunAllocator()
3719 b6e82a65 Iustin Pop
3720 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3721 a9e0c397 Iustin Pop
    if remote_node is not None:
3722 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3723 a8083063 Iustin Pop
      if remote_node is None:
3724 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3725 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3726 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3727 a9e0c397 Iustin Pop
    else:
3728 a9e0c397 Iustin Pop
      self.remote_node_info = None
3729 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3730 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3731 3ecf6786 Iustin Pop
                                 " the instance.")
3732 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3733 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3734 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3735 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3736 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3737 0834c866 Iustin Pop
                                   " replacement")
3738 a9e0c397 Iustin Pop
      # the user gave the current secondary, switch to
3739 0834c866 Iustin Pop
      # 'no-replace-secondary' mode for drbd7
3740 a9e0c397 Iustin Pop
      remote_node = None
3741 a9e0c397 Iustin Pop
    if (instance.disk_template == constants.DT_REMOTE_RAID1 and
3742 a9e0c397 Iustin Pop
        self.op.mode != constants.REPLACE_DISK_ALL):
3743 a9e0c397 Iustin Pop
      raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
3744 a9e0c397 Iustin Pop
                                 " disks replacement, not individual ones")
3745 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3746 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3747 7df43a76 Iustin Pop
          remote_node is not None):
3748 7df43a76 Iustin Pop
        # switch to replace secondary mode
3749 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3750 7df43a76 Iustin Pop
3751 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3752 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3753 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3754 a9e0c397 Iustin Pop
                                   " both at once")
3755 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3756 a9e0c397 Iustin Pop
        if remote_node is not None:
3757 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3758 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3759 a9e0c397 Iustin Pop
                                     " node disk replacement")
3760 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3761 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3762 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3763 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3764 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3765 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3766 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3767 a9e0c397 Iustin Pop
      else:
3768 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3769 a9e0c397 Iustin Pop
3770 a9e0c397 Iustin Pop
    for name in self.op.disks:
3771 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3772 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3773 a9e0c397 Iustin Pop
                                   (name, instance.name))
3774 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3775 a8083063 Iustin Pop
3776 a9e0c397 Iustin Pop
  def _ExecRR1(self, feedback_fn):
3777 a8083063 Iustin Pop
    """Replace the disks of an instance.
3778 a8083063 Iustin Pop

3779 a8083063 Iustin Pop
    """
3780 a8083063 Iustin Pop
    instance = self.instance
3781 a8083063 Iustin Pop
    iv_names = {}
3782 a8083063 Iustin Pop
    # start of work
3783 a9e0c397 Iustin Pop
    if self.op.remote_node is None:
3784 a9e0c397 Iustin Pop
      remote_node = self.sec_node
3785 a9e0c397 Iustin Pop
    else:
3786 a9e0c397 Iustin Pop
      remote_node = self.op.remote_node
3787 a8083063 Iustin Pop
    cfg = self.cfg
3788 a8083063 Iustin Pop
    for dev in instance.disks:
3789 a8083063 Iustin Pop
      size = dev.size
3790 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3791 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3792 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3793 923b1523 Iustin Pop
                                       remote_node, size, names)
3794 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3795 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3796 a8083063 Iustin Pop
                  dev.iv_name)
3797 a8083063 Iustin Pop
      #HARDCODE
3798 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
3799 3f78eef2 Iustin Pop
                                        new_drbd, False,
3800 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3801 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create new component on secondary"
3802 f4bc1f2c Michael Hanselmann
                                 " node %s. Full abort, cleanup manually!" %
3803 3ecf6786 Iustin Pop
                                 remote_node)
3804 a8083063 Iustin Pop
3805 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3806 a8083063 Iustin Pop
      #HARDCODE
3807 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3808 3f78eef2 Iustin Pop
                                      instance, new_drbd,
3809 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3810 a8083063 Iustin Pop
        # remove secondary dev
3811 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3812 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3813 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create volume on primary!"
3814 f4bc1f2c Michael Hanselmann
                                 " Full abort, cleanup manually!!")
3815 a8083063 Iustin Pop
3816 a8083063 Iustin Pop
      # the device exists now
3817 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3818 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3819 153d9724 Iustin Pop
      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
3820 153d9724 Iustin Pop
                                           [new_drbd]):
3821 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3822 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3823 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3824 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3825 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3826 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3827 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3828 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3829 a8083063 Iustin Pop
3830 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3831 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3832 a8083063 Iustin Pop
3833 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3834 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3835 a8083063 Iustin Pop
    # return value
3836 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3837 a8083063 Iustin Pop
3838 a8083063 Iustin Pop
    # so check manually all the devices
3839 a8083063 Iustin Pop
    for name in iv_names:
3840 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3841 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3842 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3843 a8083063 Iustin Pop
      if is_degr:
3844 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3845 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3846 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3847 a8083063 Iustin Pop
      if is_degr:
3848 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3849 a8083063 Iustin Pop
3850 a8083063 Iustin Pop
    for name in iv_names:
3851 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3852 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3853 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3854 153d9724 Iustin Pop
      if not rpc.call_blockdev_removechildren(instance.primary_node,
3855 153d9724 Iustin Pop
                                              dev, [child]):
3856 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3857 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3858 a8083063 Iustin Pop
        continue
3859 a8083063 Iustin Pop
3860 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3861 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3862 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3863 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3864 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3865 a8083063 Iustin Pop
                       " continuing operation." % node)
3866 a8083063 Iustin Pop
3867 a8083063 Iustin Pop
      dev.children.remove(child)
3868 a8083063 Iustin Pop
3869 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3870 a8083063 Iustin Pop
3871 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3872 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3873 a9e0c397 Iustin Pop

3874 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3875 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3876 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3877 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3878 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3879 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3880 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3881 a9e0c397 Iustin Pop
      - wait for sync across all devices
3882 a9e0c397 Iustin Pop
      - for each modified disk:
3883 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3884 a9e0c397 Iustin Pop

3885 a9e0c397 Iustin Pop
    Failures are not very well handled.
3886 cff90b79 Iustin Pop

3887 a9e0c397 Iustin Pop
    """
3888 cff90b79 Iustin Pop
    steps_total = 6
3889 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3890 a9e0c397 Iustin Pop
    instance = self.instance
3891 a9e0c397 Iustin Pop
    iv_names = {}
3892 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3893 a9e0c397 Iustin Pop
    # start of work
3894 a9e0c397 Iustin Pop
    cfg = self.cfg
3895 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3896 cff90b79 Iustin Pop
    oth_node = self.oth_node
3897 cff90b79 Iustin Pop
3898 cff90b79 Iustin Pop
    # Step: check device activation
3899 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3900 cff90b79 Iustin Pop
    info("checking volume groups")
3901 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3902 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3903 cff90b79 Iustin Pop
    if not results:
3904 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3905 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3906 cff90b79 Iustin Pop
      res = results.get(node, False)
3907 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3908 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3909 cff90b79 Iustin Pop
                                 (my_vg, node))
3910 cff90b79 Iustin Pop
    for dev in instance.disks:
3911 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3912 cff90b79 Iustin Pop
        continue
3913 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3914 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3915 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3916 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3917 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3918 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3919 cff90b79 Iustin Pop
3920 cff90b79 Iustin Pop
    # Step: check other node consistency
3921 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3922 cff90b79 Iustin Pop
    for dev in instance.disks:
3923 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3924 cff90b79 Iustin Pop
        continue
3925 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3926 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3927 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3928 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3929 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3930 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3931 cff90b79 Iustin Pop
3932 cff90b79 Iustin Pop
    # Step: create new storage
3933 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3934 a9e0c397 Iustin Pop
    for dev in instance.disks:
3935 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3936 a9e0c397 Iustin Pop
        continue
3937 a9e0c397 Iustin Pop
      size = dev.size
3938 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3939 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3940 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3941 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3942 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3943 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3944 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3945 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3946 a9e0c397 Iustin Pop
      old_lvs = dev.children
3947 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3948 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3949 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3950 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3951 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3952 a9e0c397 Iustin Pop
      # are talking about the secondary node
3953 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3954 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3955 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3956 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3957 a9e0c397 Iustin Pop
                                   " node '%s'" %
3958 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3959 a9e0c397 Iustin Pop
3960 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3961 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3962 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3963 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3964 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3965 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3966 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3967 cff90b79 Iustin Pop
      #dev.children = []
3968 cff90b79 Iustin Pop
      #cfg.Update(instance)
3969 a9e0c397 Iustin Pop
3970 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3971 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3972 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3973 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3974 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3975 cff90b79 Iustin Pop
3976 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3977 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3978 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3979 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3980 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3981 cff90b79 Iustin Pop
      rlist = []
3982 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3983 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3984 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3985 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3986 cff90b79 Iustin Pop
3987 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3988 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3989 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3990 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3991 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3992 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3993 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3994 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3995 cff90b79 Iustin Pop
3996 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3997 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3998 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3999 a9e0c397 Iustin Pop
4000 cff90b79 Iustin Pop
      for disk in old_lvs:
4001 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
4002 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
4003 a9e0c397 Iustin Pop
4004 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
4005 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
4006 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
4007 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
4008 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
4009 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
4010 cff90b79 Iustin Pop
                    " logical volumes")
4011 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
4012 a9e0c397 Iustin Pop
4013 a9e0c397 Iustin Pop
      dev.children = new_lvs
4014 a9e0c397 Iustin Pop
      cfg.Update(instance)
4015 a9e0c397 Iustin Pop
4016 cff90b79 Iustin Pop
    # Step: wait for sync
4017 a9e0c397 Iustin Pop
4018 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4019 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4020 a9e0c397 Iustin Pop
    # return value
4021 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4022 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
4023 a9e0c397 Iustin Pop
4024 a9e0c397 Iustin Pop
    # so check manually all the devices
4025 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4026 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
4027 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
4028 a9e0c397 Iustin Pop
      if is_degr:
4029 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4030 a9e0c397 Iustin Pop
4031 cff90b79 Iustin Pop
    # Step: remove old storage
4032 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4033 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4034 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
4035 a9e0c397 Iustin Pop
      for lv in old_lvs:
4036 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
4037 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
4038 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
4039 a9e0c397 Iustin Pop
          continue
4040 a9e0c397 Iustin Pop
4041 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
4042 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
4043 a9e0c397 Iustin Pop

4044 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4045 a9e0c397 Iustin Pop
      - for all disks of the instance:
4046 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
4047 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
4048 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
4049 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
4050 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
4051 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
4052 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
4053 a9e0c397 Iustin Pop
          not network enabled
4054 a9e0c397 Iustin Pop
      - wait for sync across all devices
4055 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
4056 a9e0c397 Iustin Pop

4057 a9e0c397 Iustin Pop
    Failures are not very well handled.
4058 0834c866 Iustin Pop

4059 a9e0c397 Iustin Pop
    """
4060 0834c866 Iustin Pop
    steps_total = 6
4061 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4062 a9e0c397 Iustin Pop
    instance = self.instance
4063 a9e0c397 Iustin Pop
    iv_names = {}
4064 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4065 a9e0c397 Iustin Pop
    # start of work
4066 a9e0c397 Iustin Pop
    cfg = self.cfg
4067 a9e0c397 Iustin Pop
    old_node = self.tgt_node
4068 a9e0c397 Iustin Pop
    new_node = self.new_node
4069 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
4070 0834c866 Iustin Pop
4071 0834c866 Iustin Pop
    # Step: check device activation
4072 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4073 0834c866 Iustin Pop
    info("checking volume groups")
4074 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
4075 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
4076 0834c866 Iustin Pop
    if not results:
4077 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4078 0834c866 Iustin Pop
    for node in pri_node, new_node:
4079 0834c866 Iustin Pop
      res = results.get(node, False)
4080 0834c866 Iustin Pop
      if not res or my_vg not in res:
4081 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4082 0834c866 Iustin Pop
                                 (my_vg, node))
4083 0834c866 Iustin Pop
    for dev in instance.disks:
4084 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4085 0834c866 Iustin Pop
        continue
4086 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
4087 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4088 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
4089 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
4090 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
4091 0834c866 Iustin Pop
4092 0834c866 Iustin Pop
    # Step: check other node consistency
4093 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4094 0834c866 Iustin Pop
    for dev in instance.disks:
4095 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4096 0834c866 Iustin Pop
        continue
4097 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
4098 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
4099 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4100 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4101 0834c866 Iustin Pop
                                 pri_node)
4102 0834c866 Iustin Pop
4103 0834c866 Iustin Pop
    # Step: create new storage
4104 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4105 a9e0c397 Iustin Pop
    for dev in instance.disks:
4106 a9e0c397 Iustin Pop
      size = dev.size
4107 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
4108 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4109 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4110 a9e0c397 Iustin Pop
      # are talking about the secondary node
4111 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4112 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
4113 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4114 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4115 a9e0c397 Iustin Pop
                                   " node '%s'" %
4116 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4117 a9e0c397 Iustin Pop
4118 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
4119 0834c866 Iustin Pop
4120 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4121 0834c866 Iustin Pop
    for dev in instance.disks:
4122 0834c866 Iustin Pop
      size = dev.size
4123 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
4124 a9e0c397 Iustin Pop
      # create new devices on new_node
4125 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4126 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
4127 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
4128 a9e0c397 Iustin Pop
                              children=dev.children)
4129 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
4130 3f78eef2 Iustin Pop
                                        new_drbd, False,
4131 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
4132 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4133 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4134 a9e0c397 Iustin Pop
4135 0834c866 Iustin Pop
    for dev in instance.disks:
4136 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4137 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
4138 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4139 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
4140 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
4141 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4142 a9e0c397 Iustin Pop
4143 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4144 642445d9 Iustin Pop
    done = 0
4145 642445d9 Iustin Pop
    for dev in instance.disks:
4146 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4147 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
4148 642445d9 Iustin Pop
      # detach from network
4149 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
4150 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4151 642445d9 Iustin Pop
      # standalone state
4152 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
4153 642445d9 Iustin Pop
        done += 1
4154 642445d9 Iustin Pop
      else:
4155 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
4156 642445d9 Iustin Pop
                dev.iv_name)
4157 642445d9 Iustin Pop
4158 642445d9 Iustin Pop
    if not done:
4159 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4160 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4161 642445d9 Iustin Pop
4162 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4163 642445d9 Iustin Pop
    # the instance to point to the new secondary
4164 642445d9 Iustin Pop
    info("updating instance configuration")
4165 642445d9 Iustin Pop
    for dev in instance.disks:
4166 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
4167 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4168 642445d9 Iustin Pop
    cfg.Update(instance)
4169 a9e0c397 Iustin Pop
4170 642445d9 Iustin Pop
    # and now perform the drbd attach
4171 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4172 642445d9 Iustin Pop
    failures = []
4173 642445d9 Iustin Pop
    for dev in instance.disks:
4174 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
4175 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4176 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4177 642445d9 Iustin Pop
      # is correct
4178 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4179 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
4180 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
4181 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4182 a9e0c397 Iustin Pop
4183 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4184 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4185 a9e0c397 Iustin Pop
    # return value
4186 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4187 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
4188 a9e0c397 Iustin Pop
4189 a9e0c397 Iustin Pop
    # so check manually all the devices
4190 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
4191 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4192 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
4193 a9e0c397 Iustin Pop
      if is_degr:
4194 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4195 a9e0c397 Iustin Pop
4196 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4197 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
4198 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
4199 a9e0c397 Iustin Pop
      for lv in old_lvs:
4200 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4201 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
4202 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4203 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4204 a9e0c397 Iustin Pop
4205 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4206 a9e0c397 Iustin Pop
    """Execute disk replacement.
4207 a9e0c397 Iustin Pop

4208 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4209 a9e0c397 Iustin Pop

4210 a9e0c397 Iustin Pop
    """
4211 a9e0c397 Iustin Pop
    instance = self.instance
4212 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_REMOTE_RAID1:
4213 a9e0c397 Iustin Pop
      fn = self._ExecRR1
4214 a9e0c397 Iustin Pop
    elif instance.disk_template == constants.DT_DRBD8:
4215 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4216 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4217 a9e0c397 Iustin Pop
      else:
4218 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4219 a9e0c397 Iustin Pop
    else:
4220 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4221 a9e0c397 Iustin Pop
    return fn(feedback_fn)
4222 a9e0c397 Iustin Pop
4223 a8083063 Iustin Pop
4224 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4225 a8083063 Iustin Pop
  """Query runtime instance data.
4226 a8083063 Iustin Pop

4227 a8083063 Iustin Pop
  """
4228 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
4229 a8083063 Iustin Pop
4230 a8083063 Iustin Pop
  def CheckPrereq(self):
4231 a8083063 Iustin Pop
    """Check prerequisites.
4232 a8083063 Iustin Pop

4233 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4234 a8083063 Iustin Pop

4235 a8083063 Iustin Pop
    """
4236 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
4237 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4238 a8083063 Iustin Pop
    if self.op.instances:
4239 a8083063 Iustin Pop
      self.wanted_instances = []
4240 a8083063 Iustin Pop
      names = self.op.instances
4241 a8083063 Iustin Pop
      for name in names:
4242 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
4243 a8083063 Iustin Pop
        if instance is None:
4244 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
4245 515207af Guido Trotter
        self.wanted_instances.append(instance)
4246 a8083063 Iustin Pop
    else:
4247 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4248 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
4249 a8083063 Iustin Pop
    return
4250 a8083063 Iustin Pop
4251 a8083063 Iustin Pop
4252 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4253 a8083063 Iustin Pop
    """Compute block device status.
4254 a8083063 Iustin Pop

4255 a8083063 Iustin Pop
    """
4256 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4257 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4258 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4259 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4260 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4261 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4262 a8083063 Iustin Pop
      else:
4263 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4264 a8083063 Iustin Pop
4265 a8083063 Iustin Pop
    if snode:
4266 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4267 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4268 a8083063 Iustin Pop
    else:
4269 a8083063 Iustin Pop
      dev_sstatus = None
4270 a8083063 Iustin Pop
4271 a8083063 Iustin Pop
    if dev.children:
4272 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4273 a8083063 Iustin Pop
                      for child in dev.children]
4274 a8083063 Iustin Pop
    else:
4275 a8083063 Iustin Pop
      dev_children = []
4276 a8083063 Iustin Pop
4277 a8083063 Iustin Pop
    data = {
4278 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4279 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4280 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4281 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4282 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4283 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4284 a8083063 Iustin Pop
      "children": dev_children,
4285 a8083063 Iustin Pop
      }
4286 a8083063 Iustin Pop
4287 a8083063 Iustin Pop
    return data
4288 a8083063 Iustin Pop
4289 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4290 a8083063 Iustin Pop
    """Gather and return data"""
4291 a8083063 Iustin Pop
    result = {}
4292 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4293 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4294 a8083063 Iustin Pop
                                                instance.name)
4295 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4296 a8083063 Iustin Pop
        remote_state = "up"
4297 a8083063 Iustin Pop
      else:
4298 a8083063 Iustin Pop
        remote_state = "down"
4299 a8083063 Iustin Pop
      if instance.status == "down":
4300 a8083063 Iustin Pop
        config_state = "down"
4301 a8083063 Iustin Pop
      else:
4302 a8083063 Iustin Pop
        config_state = "up"
4303 a8083063 Iustin Pop
4304 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4305 a8083063 Iustin Pop
               for device in instance.disks]
4306 a8083063 Iustin Pop
4307 a8083063 Iustin Pop
      idict = {
4308 a8083063 Iustin Pop
        "name": instance.name,
4309 a8083063 Iustin Pop
        "config_state": config_state,
4310 a8083063 Iustin Pop
        "run_state": remote_state,
4311 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4312 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4313 a8083063 Iustin Pop
        "os": instance.os,
4314 a8083063 Iustin Pop
        "memory": instance.memory,
4315 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4316 a8083063 Iustin Pop
        "disks": disks,
4317 58acb49d Alexander Schreiber
        "network_port": instance.network_port,
4318 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4319 71aa8f73 Iustin Pop
        "kernel_path": instance.kernel_path,
4320 71aa8f73 Iustin Pop
        "initrd_path": instance.initrd_path,
4321 8ae6bb54 Iustin Pop
        "hvm_boot_order": instance.hvm_boot_order,
4322 a8083063 Iustin Pop
        }
4323 a8083063 Iustin Pop
4324 a8083063 Iustin Pop
      result[instance.name] = idict
4325 a8083063 Iustin Pop
4326 a8083063 Iustin Pop
    return result
4327 a8083063 Iustin Pop
4328 a8083063 Iustin Pop
4329 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4330 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4331 a8083063 Iustin Pop

4332 a8083063 Iustin Pop
  """
4333 a8083063 Iustin Pop
  HPATH = "instance-modify"
4334 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4335 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4336 a8083063 Iustin Pop
4337 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4338 a8083063 Iustin Pop
    """Build hooks env.
4339 a8083063 Iustin Pop

4340 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4341 a8083063 Iustin Pop

4342 a8083063 Iustin Pop
    """
4343 396e1b78 Michael Hanselmann
    args = dict()
4344 a8083063 Iustin Pop
    if self.mem:
4345 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4346 a8083063 Iustin Pop
    if self.vcpus:
4347 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4348 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4349 396e1b78 Michael Hanselmann
      if self.do_ip:
4350 396e1b78 Michael Hanselmann
        ip = self.ip
4351 396e1b78 Michael Hanselmann
      else:
4352 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4353 396e1b78 Michael Hanselmann
      if self.bridge:
4354 396e1b78 Michael Hanselmann
        bridge = self.bridge
4355 396e1b78 Michael Hanselmann
      else:
4356 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4357 ef756965 Iustin Pop
      if self.mac:
4358 ef756965 Iustin Pop
        mac = self.mac
4359 ef756965 Iustin Pop
      else:
4360 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4361 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4362 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4363 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4364 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4365 a8083063 Iustin Pop
    return env, nl, nl
4366 a8083063 Iustin Pop
4367 a8083063 Iustin Pop
  def CheckPrereq(self):
4368 a8083063 Iustin Pop
    """Check prerequisites.
4369 a8083063 Iustin Pop

4370 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4371 a8083063 Iustin Pop

4372 a8083063 Iustin Pop
    """
4373 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4374 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4375 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4376 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4377 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4378 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4379 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4380 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4381 7767bbf5 Manuel Franceschini
    all_params = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4382 7767bbf5 Manuel Franceschini
                  self.kernel_path, self.initrd_path, self.hvm_boot_order]
4383 7767bbf5 Manuel Franceschini
    if all_params.count(None) == len(all_params):
4384 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4385 a8083063 Iustin Pop
    if self.mem is not None:
4386 a8083063 Iustin Pop
      try:
4387 a8083063 Iustin Pop
        self.mem = int(self.mem)
4388 a8083063 Iustin Pop
      except ValueError, err:
4389 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4390 a8083063 Iustin Pop
    if self.vcpus is not None:
4391 a8083063 Iustin Pop
      try:
4392 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4393 a8083063 Iustin Pop
      except ValueError, err:
4394 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4395 a8083063 Iustin Pop
    if self.ip is not None:
4396 a8083063 Iustin Pop
      self.do_ip = True
4397 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4398 a8083063 Iustin Pop
        self.ip = None
4399 a8083063 Iustin Pop
      else:
4400 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4401 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4402 a8083063 Iustin Pop
    else:
4403 a8083063 Iustin Pop
      self.do_ip = False
4404 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4405 1862d460 Alexander Schreiber
    if self.mac is not None:
4406 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4407 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4408 1862d460 Alexander Schreiber
                                   self.mac)
4409 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4410 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4411 a8083063 Iustin Pop
4412 973d7867 Iustin Pop
    if self.kernel_path is not None:
4413 973d7867 Iustin Pop
      self.do_kernel_path = True
4414 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4415 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4416 973d7867 Iustin Pop
4417 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4418 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4419 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4420 973d7867 Iustin Pop
                                    " filename")
4421 8cafeb26 Iustin Pop
    else:
4422 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4423 973d7867 Iustin Pop
4424 973d7867 Iustin Pop
    if self.initrd_path is not None:
4425 973d7867 Iustin Pop
      self.do_initrd_path = True
4426 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4427 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4428 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4429 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4430 973d7867 Iustin Pop
                                    " filename")
4431 8cafeb26 Iustin Pop
    else:
4432 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4433 973d7867 Iustin Pop
4434 25c5878d Alexander Schreiber
    # boot order verification
4435 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4436 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4437 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4438 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4439 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4440 25c5878d Alexander Schreiber
                                     " or 'default'")
4441 25c5878d Alexander Schreiber
4442 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4443 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
4444 a8083063 Iustin Pop
    if instance is None:
4445 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
4446 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4447 a8083063 Iustin Pop
    self.op.instance_name = instance.name
4448 a8083063 Iustin Pop
    self.instance = instance
4449 a8083063 Iustin Pop
    return
4450 a8083063 Iustin Pop
4451 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4452 a8083063 Iustin Pop
    """Modifies an instance.
4453 a8083063 Iustin Pop

4454 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4455 a8083063 Iustin Pop
    """
4456 a8083063 Iustin Pop
    result = []
4457 a8083063 Iustin Pop
    instance = self.instance
4458 a8083063 Iustin Pop
    if self.mem:
4459 a8083063 Iustin Pop
      instance.memory = self.mem
4460 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4461 a8083063 Iustin Pop
    if self.vcpus:
4462 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4463 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4464 a8083063 Iustin Pop
    if self.do_ip:
4465 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4466 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4467 a8083063 Iustin Pop
    if self.bridge:
4468 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4469 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4470 1862d460 Alexander Schreiber
    if self.mac:
4471 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4472 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4473 973d7867 Iustin Pop
    if self.do_kernel_path:
4474 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4475 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4476 973d7867 Iustin Pop
    if self.do_initrd_path:
4477 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4478 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4479 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4480 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4481 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4482 25c5878d Alexander Schreiber
      else:
4483 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4484 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4485 a8083063 Iustin Pop
4486 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
4487 a8083063 Iustin Pop
4488 a8083063 Iustin Pop
    return result
4489 a8083063 Iustin Pop
4490 a8083063 Iustin Pop
4491 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4492 a8083063 Iustin Pop
  """Query the exports list
4493 a8083063 Iustin Pop

4494 a8083063 Iustin Pop
  """
4495 a8083063 Iustin Pop
  _OP_REQP = []
4496 a8083063 Iustin Pop
4497 a8083063 Iustin Pop
  def CheckPrereq(self):
4498 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4499 a8083063 Iustin Pop

4500 a8083063 Iustin Pop
    """
4501 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4502 a8083063 Iustin Pop
4503 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4504 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4505 a8083063 Iustin Pop

4506 a8083063 Iustin Pop
    Returns:
4507 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4508 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4509 a8083063 Iustin Pop
      that node.
4510 a8083063 Iustin Pop

4511 a8083063 Iustin Pop
    """
4512 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4513 a8083063 Iustin Pop
4514 a8083063 Iustin Pop
4515 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4516 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4517 a8083063 Iustin Pop

4518 a8083063 Iustin Pop
  """
4519 a8083063 Iustin Pop
  HPATH = "instance-export"
4520 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4521 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4522 a8083063 Iustin Pop
4523 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4524 a8083063 Iustin Pop
    """Build hooks env.
4525 a8083063 Iustin Pop

4526 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4527 a8083063 Iustin Pop

4528 a8083063 Iustin Pop
    """
4529 a8083063 Iustin Pop
    env = {
4530 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4531 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4532 a8083063 Iustin Pop
      }
4533 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4534 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4535 a8083063 Iustin Pop
          self.op.target_node]
4536 a8083063 Iustin Pop
    return env, nl, nl
4537 a8083063 Iustin Pop
4538 a8083063 Iustin Pop
  def CheckPrereq(self):
4539 a8083063 Iustin Pop
    """Check prerequisites.
4540 a8083063 Iustin Pop

4541 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4542 a8083063 Iustin Pop

4543 a8083063 Iustin Pop
    """
4544 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4545 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4546 a8083063 Iustin Pop
    if self.instance is None:
4547 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4548 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4549 a8083063 Iustin Pop
4550 a8083063 Iustin Pop
    # node verification
4551 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4552 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4553 a8083063 Iustin Pop
4554 a8083063 Iustin Pop
    if self.dst_node is None:
4555 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4556 3ecf6786 Iustin Pop
                                 self.op.target_node)
4557 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4558 a8083063 Iustin Pop
4559 b6023d6c Manuel Franceschini
    # instance disk type verification
4560 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4561 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4562 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4563 b6023d6c Manuel Franceschini
                                   " file-based disks")
4564 b6023d6c Manuel Franceschini
4565 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4566 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4567 a8083063 Iustin Pop

4568 a8083063 Iustin Pop
    """
4569 a8083063 Iustin Pop
    instance = self.instance
4570 a8083063 Iustin Pop
    dst_node = self.dst_node
4571 a8083063 Iustin Pop
    src_node = instance.primary_node
4572 a8083063 Iustin Pop
    if self.op.shutdown:
4573 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4574 fb300fb7 Guido Trotter
      if not rpc.call_instance_shutdown(src_node, instance):
4575 fb300fb7 Guido Trotter
         raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4576 b4de68a9 Iustin Pop
                                  (instance.name, src_node))
4577 a8083063 Iustin Pop
4578 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4579 a8083063 Iustin Pop
4580 a8083063 Iustin Pop
    snap_disks = []
4581 a8083063 Iustin Pop
4582 a8083063 Iustin Pop
    try:
4583 a8083063 Iustin Pop
      for disk in instance.disks:
4584 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4585 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4586 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4587 a8083063 Iustin Pop
4588 a8083063 Iustin Pop
          if not new_dev_name:
4589 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4590 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4591 a8083063 Iustin Pop
          else:
4592 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4593 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4594 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4595 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4596 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4597 a8083063 Iustin Pop
4598 a8083063 Iustin Pop
    finally:
4599 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4600 fb300fb7 Guido Trotter
        if not rpc.call_instance_start(src_node, instance, None):
4601 fb300fb7 Guido Trotter
          _ShutdownInstanceDisks(instance, self.cfg)
4602 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4603 a8083063 Iustin Pop
4604 a8083063 Iustin Pop
    # TODO: check for size
4605 a8083063 Iustin Pop
4606 a8083063 Iustin Pop
    for dev in snap_disks:
4607 16687b98 Manuel Franceschini
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
4608 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4609 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4610 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4611 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4612 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4613 a8083063 Iustin Pop
4614 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4615 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4616 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4617 a8083063 Iustin Pop
4618 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4619 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4620 a8083063 Iustin Pop
4621 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4622 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4623 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4624 a8083063 Iustin Pop
    if nodelist:
4625 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4626 5bfac263 Iustin Pop
      exportlist = self.proc.ChainOpCode(op)
4627 a8083063 Iustin Pop
      for node in exportlist:
4628 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4629 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4630 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4631 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4632 5c947f38 Iustin Pop
4633 5c947f38 Iustin Pop
4634 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4635 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4636 9ac99fda Guido Trotter

4637 9ac99fda Guido Trotter
  """
4638 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4639 9ac99fda Guido Trotter
4640 9ac99fda Guido Trotter
  def CheckPrereq(self):
4641 9ac99fda Guido Trotter
    """Check prerequisites.
4642 9ac99fda Guido Trotter
    """
4643 9ac99fda Guido Trotter
    pass
4644 9ac99fda Guido Trotter
4645 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4646 9ac99fda Guido Trotter
    """Remove any export.
4647 9ac99fda Guido Trotter

4648 9ac99fda Guido Trotter
    """
4649 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4650 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4651 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4652 9ac99fda Guido Trotter
    fqdn_warn = False
4653 9ac99fda Guido Trotter
    if not instance_name:
4654 9ac99fda Guido Trotter
      fqdn_warn = True
4655 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4656 9ac99fda Guido Trotter
4657 9ac99fda Guido Trotter
    op = opcodes.OpQueryExports(nodes=[])
4658 9ac99fda Guido Trotter
    exportlist = self.proc.ChainOpCode(op)
4659 9ac99fda Guido Trotter
    found = False
4660 9ac99fda Guido Trotter
    for node in exportlist:
4661 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4662 9ac99fda Guido Trotter
        found = True
4663 9ac99fda Guido Trotter
        if not rpc.call_export_remove(node, instance_name):
4664 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4665 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4666 9ac99fda Guido Trotter
4667 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4668 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4669 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4670 9ac99fda Guido Trotter
                  " Domain Name.")
4671 9ac99fda Guido Trotter
4672 9ac99fda Guido Trotter
4673 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4674 5c947f38 Iustin Pop
  """Generic tags LU.
4675 5c947f38 Iustin Pop

4676 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4677 5c947f38 Iustin Pop

4678 5c947f38 Iustin Pop
  """
4679 5c947f38 Iustin Pop
  def CheckPrereq(self):
4680 5c947f38 Iustin Pop
    """Check prerequisites.
4681 5c947f38 Iustin Pop

4682 5c947f38 Iustin Pop
    """
4683 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4684 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4685 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4686 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4687 5c947f38 Iustin Pop
      if name is None:
4688 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4689 3ecf6786 Iustin Pop
                                   (self.op.name,))
4690 5c947f38 Iustin Pop
      self.op.name = name
4691 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4692 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4693 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4694 5c947f38 Iustin Pop
      if name is None:
4695 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4696 3ecf6786 Iustin Pop
                                   (self.op.name,))
4697 5c947f38 Iustin Pop
      self.op.name = name
4698 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4699 5c947f38 Iustin Pop
    else:
4700 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4701 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4702 5c947f38 Iustin Pop
4703 5c947f38 Iustin Pop
4704 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4705 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4706 5c947f38 Iustin Pop

4707 5c947f38 Iustin Pop
  """
4708 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4709 5c947f38 Iustin Pop
4710 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4711 5c947f38 Iustin Pop
    """Returns the tag list.
4712 5c947f38 Iustin Pop

4713 5c947f38 Iustin Pop
    """
4714 5c947f38 Iustin Pop
    return self.target.GetTags()
4715 5c947f38 Iustin Pop
4716 5c947f38 Iustin Pop
4717 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4718 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4719 73415719 Iustin Pop

4720 73415719 Iustin Pop
  """
4721 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4722 73415719 Iustin Pop
4723 73415719 Iustin Pop
  def CheckPrereq(self):
4724 73415719 Iustin Pop
    """Check prerequisites.
4725 73415719 Iustin Pop

4726 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4727 73415719 Iustin Pop

4728 73415719 Iustin Pop
    """
4729 73415719 Iustin Pop
    try:
4730 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4731 73415719 Iustin Pop
    except re.error, err:
4732 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4733 73415719 Iustin Pop
                                 (self.op.pattern, err))
4734 73415719 Iustin Pop
4735 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4736 73415719 Iustin Pop
    """Returns the tag list.
4737 73415719 Iustin Pop

4738 73415719 Iustin Pop
    """
4739 73415719 Iustin Pop
    cfg = self.cfg
4740 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4741 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4742 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4743 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4744 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4745 73415719 Iustin Pop
    results = []
4746 73415719 Iustin Pop
    for path, target in tgts:
4747 73415719 Iustin Pop
      for tag in target.GetTags():
4748 73415719 Iustin Pop
        if self.re.search(tag):
4749 73415719 Iustin Pop
          results.append((path, tag))
4750 73415719 Iustin Pop
    return results
4751 73415719 Iustin Pop
4752 73415719 Iustin Pop
4753 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4754 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4755 5c947f38 Iustin Pop

4756 5c947f38 Iustin Pop
  """
4757 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4758 5c947f38 Iustin Pop
4759 5c947f38 Iustin Pop
  def CheckPrereq(self):
4760 5c947f38 Iustin Pop
    """Check prerequisites.
4761 5c947f38 Iustin Pop

4762 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4763 5c947f38 Iustin Pop

4764 5c947f38 Iustin Pop
    """
4765 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4766 f27302fa Iustin Pop
    for tag in self.op.tags:
4767 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4768 5c947f38 Iustin Pop
4769 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4770 5c947f38 Iustin Pop
    """Sets the tag.
4771 5c947f38 Iustin Pop

4772 5c947f38 Iustin Pop
    """
4773 5c947f38 Iustin Pop
    try:
4774 f27302fa Iustin Pop
      for tag in self.op.tags:
4775 f27302fa Iustin Pop
        self.target.AddTag(tag)
4776 5c947f38 Iustin Pop
    except errors.TagError, err:
4777 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4778 5c947f38 Iustin Pop
    try:
4779 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4780 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4781 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4782 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4783 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4784 5c947f38 Iustin Pop
4785 5c947f38 Iustin Pop
4786 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4787 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4788 5c947f38 Iustin Pop

4789 5c947f38 Iustin Pop
  """
4790 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4791 5c947f38 Iustin Pop
4792 5c947f38 Iustin Pop
  def CheckPrereq(self):
4793 5c947f38 Iustin Pop
    """Check prerequisites.
4794 5c947f38 Iustin Pop

4795 5c947f38 Iustin Pop
    This checks that we have the given tag.
4796 5c947f38 Iustin Pop

4797 5c947f38 Iustin Pop
    """
4798 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4799 f27302fa Iustin Pop
    for tag in self.op.tags:
4800 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4801 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4802 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4803 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4804 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4805 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4806 f27302fa Iustin Pop
      diff_names.sort()
4807 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4808 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4809 5c947f38 Iustin Pop
4810 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4811 5c947f38 Iustin Pop
    """Remove the tag from the object.
4812 5c947f38 Iustin Pop

4813 5c947f38 Iustin Pop
    """
4814 f27302fa Iustin Pop
    for tag in self.op.tags:
4815 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4816 5c947f38 Iustin Pop
    try:
4817 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4818 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4819 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4820 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4821 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4822 06009e27 Iustin Pop
4823 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
4824 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
4825 06009e27 Iustin Pop

4826 06009e27 Iustin Pop
  This LU sleeps on the master and/or nodes for a specified amoutn of
4827 06009e27 Iustin Pop
  time.
4828 06009e27 Iustin Pop

4829 06009e27 Iustin Pop
  """
4830 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
4831 06009e27 Iustin Pop
4832 06009e27 Iustin Pop
  def CheckPrereq(self):
4833 06009e27 Iustin Pop
    """Check prerequisites.
4834 06009e27 Iustin Pop

4835 06009e27 Iustin Pop
    This checks that we have a good list of nodes and/or the duration
4836 06009e27 Iustin Pop
    is valid.
4837 06009e27 Iustin Pop

4838 06009e27 Iustin Pop
    """
4839 06009e27 Iustin Pop
4840 06009e27 Iustin Pop
    if self.op.on_nodes:
4841 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
4842 06009e27 Iustin Pop
4843 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
4844 06009e27 Iustin Pop
    """Do the actual sleep.
4845 06009e27 Iustin Pop

4846 06009e27 Iustin Pop
    """
4847 06009e27 Iustin Pop
    if self.op.on_master:
4848 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
4849 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
4850 06009e27 Iustin Pop
    if self.op.on_nodes:
4851 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
4852 06009e27 Iustin Pop
      if not result:
4853 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
4854 06009e27 Iustin Pop
      for node, node_result in result.items():
4855 06009e27 Iustin Pop
        if not node_result:
4856 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
4857 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
4858 d61df03e Iustin Pop
4859 d61df03e Iustin Pop
4860 d1c2dd75 Iustin Pop
class IAllocator(object):
4861 d1c2dd75 Iustin Pop
  """IAllocator framework.
4862 d61df03e Iustin Pop

4863 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
4864 d1c2dd75 Iustin Pop
    - cfg/sstore that are needed to query the cluster
4865 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
4866 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
4867 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
4868 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
4869 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
4870 d1c2dd75 Iustin Pop
      easy usage
4871 d61df03e Iustin Pop

4872 d61df03e Iustin Pop
  """
4873 29859cb7 Iustin Pop
  _ALLO_KEYS = [
4874 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
4875 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
4876 d1c2dd75 Iustin Pop
    ]
4877 29859cb7 Iustin Pop
  _RELO_KEYS = [
4878 29859cb7 Iustin Pop
    "relocate_from",
4879 29859cb7 Iustin Pop
    ]
4880 d1c2dd75 Iustin Pop
4881 29859cb7 Iustin Pop
  def __init__(self, cfg, sstore, mode, name, **kwargs):
4882 d1c2dd75 Iustin Pop
    self.cfg = cfg
4883 d1c2dd75 Iustin Pop
    self.sstore = sstore
4884 d1c2dd75 Iustin Pop
    # init buffer variables
4885 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
4886 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
4887 29859cb7 Iustin Pop
    self.mode = mode
4888 29859cb7 Iustin Pop
    self.name = name
4889 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
4890 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
4891 29859cb7 Iustin Pop
    self.relocate_from = None
4892 27579978 Iustin Pop
    # computed fields
4893 27579978 Iustin Pop
    self.required_nodes = None
4894 d1c2dd75 Iustin Pop
    # init result fields
4895 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
4896 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
4897 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
4898 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
4899 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
4900 29859cb7 Iustin Pop
    else:
4901 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
4902 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
4903 d1c2dd75 Iustin Pop
    for key in kwargs:
4904 29859cb7 Iustin Pop
      if key not in keyset:
4905 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
4906 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4907 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
4908 29859cb7 Iustin Pop
    for key in keyset:
4909 d1c2dd75 Iustin Pop
      if key not in kwargs:
4910 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
4911 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4912 d1c2dd75 Iustin Pop
    self._BuildInputData()
4913 d1c2dd75 Iustin Pop
4914 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
4915 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
4916 d1c2dd75 Iustin Pop

4917 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
4918 d1c2dd75 Iustin Pop

4919 d1c2dd75 Iustin Pop
    """
4920 d1c2dd75 Iustin Pop
    cfg = self.cfg
4921 d1c2dd75 Iustin Pop
    # cluster data
4922 d1c2dd75 Iustin Pop
    data = {
4923 d1c2dd75 Iustin Pop
      "version": 1,
4924 d1c2dd75 Iustin Pop
      "cluster_name": self.sstore.GetClusterName(),
4925 d1c2dd75 Iustin Pop
      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
4926 6286519f Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
4927 d1c2dd75 Iustin Pop
      # we don't have job IDs
4928 d61df03e Iustin Pop
      }
4929 d61df03e Iustin Pop
4930 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
4931 6286519f Iustin Pop
4932 d1c2dd75 Iustin Pop
    # node data
4933 d1c2dd75 Iustin Pop
    node_results = {}
4934 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
4935 d1c2dd75 Iustin Pop
    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
4936 d1c2dd75 Iustin Pop
    for nname in node_list:
4937 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
4938 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
4939 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
4940 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
4941 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
4942 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
4943 d1c2dd75 Iustin Pop
        if attr not in remote_info:
4944 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
4945 d1c2dd75 Iustin Pop
                                   (nname, attr))
4946 d1c2dd75 Iustin Pop
        try:
4947 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
4948 d1c2dd75 Iustin Pop
        except ValueError, err:
4949 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
4950 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
4951 6286519f Iustin Pop
      # compute memory used by primary instances
4952 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
4953 6286519f Iustin Pop
      for iinfo in i_list:
4954 6286519f Iustin Pop
        if iinfo.primary_node == nname:
4955 6286519f Iustin Pop
          i_p_mem += iinfo.memory
4956 6286519f Iustin Pop
          if iinfo.status == "up":
4957 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
4958 6286519f Iustin Pop
4959 b2662e7f Iustin Pop
      # compute memory used by instances
4960 d1c2dd75 Iustin Pop
      pnr = {
4961 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
4962 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
4963 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
4964 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
4965 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
4966 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
4967 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
4968 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
4969 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
4970 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
4971 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
4972 d1c2dd75 Iustin Pop
        }
4973 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
4974 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
4975 d1c2dd75 Iustin Pop
4976 d1c2dd75 Iustin Pop
    # instance data
4977 d1c2dd75 Iustin Pop
    instance_data = {}
4978 6286519f Iustin Pop
    for iinfo in i_list:
4979 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
4980 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
4981 d1c2dd75 Iustin Pop
      pir = {
4982 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
4983 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
4984 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
4985 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
4986 d1c2dd75 Iustin Pop
        "os": iinfo.os,
4987 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
4988 d1c2dd75 Iustin Pop
        "nics": nic_data,
4989 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
4990 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
4991 d1c2dd75 Iustin Pop
        }
4992 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
4993 d61df03e Iustin Pop
4994 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
4995 d61df03e Iustin Pop
4996 d1c2dd75 Iustin Pop
    self.in_data = data
4997 d61df03e Iustin Pop
4998 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
4999 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
5000 d61df03e Iustin Pop

5001 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
5002 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5003 d61df03e Iustin Pop

5004 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5005 d1c2dd75 Iustin Pop
    done.
5006 d61df03e Iustin Pop

5007 d1c2dd75 Iustin Pop
    """
5008 d1c2dd75 Iustin Pop
    data = self.in_data
5009 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5010 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5011 d1c2dd75 Iustin Pop
5012 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
5013 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
5014 d1c2dd75 Iustin Pop
5015 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5016 27579978 Iustin Pop
      self.required_nodes = 2
5017 27579978 Iustin Pop
    else:
5018 27579978 Iustin Pop
      self.required_nodes = 1
5019 d1c2dd75 Iustin Pop
    request = {
5020 d1c2dd75 Iustin Pop
      "type": "allocate",
5021 d1c2dd75 Iustin Pop
      "name": self.name,
5022 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5023 d1c2dd75 Iustin Pop
      "tags": self.tags,
5024 d1c2dd75 Iustin Pop
      "os": self.os,
5025 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5026 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5027 d1c2dd75 Iustin Pop
      "disks": self.disks,
5028 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5029 d1c2dd75 Iustin Pop
      "nics": self.nics,
5030 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5031 d1c2dd75 Iustin Pop
      }
5032 d1c2dd75 Iustin Pop
    data["request"] = request
5033 298fe380 Iustin Pop
5034 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5035 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5036 298fe380 Iustin Pop

5037 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5038 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5039 d61df03e Iustin Pop

5040 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5041 d1c2dd75 Iustin Pop
    done.
5042 d61df03e Iustin Pop

5043 d1c2dd75 Iustin Pop
    """
5044 27579978 Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.name)
5045 27579978 Iustin Pop
    if instance is None:
5046 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5047 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5048 27579978 Iustin Pop
5049 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5050 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5051 27579978 Iustin Pop
5052 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5053 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5054 2a139bb0 Iustin Pop
5055 27579978 Iustin Pop
    self.required_nodes = 1
5056 27579978 Iustin Pop
5057 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
5058 27579978 Iustin Pop
                                  instance.disks[0].size,
5059 27579978 Iustin Pop
                                  instance.disks[1].size)
5060 27579978 Iustin Pop
5061 d1c2dd75 Iustin Pop
    request = {
5062 2a139bb0 Iustin Pop
      "type": "relocate",
5063 d1c2dd75 Iustin Pop
      "name": self.name,
5064 27579978 Iustin Pop
      "disk_space_total": disk_space,
5065 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5066 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5067 d1c2dd75 Iustin Pop
      }
5068 27579978 Iustin Pop
    self.in_data["request"] = request
5069 d61df03e Iustin Pop
5070 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5071 d1c2dd75 Iustin Pop
    """Build input data structures.
5072 d61df03e Iustin Pop

5073 d1c2dd75 Iustin Pop
    """
5074 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5075 d61df03e Iustin Pop
5076 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5077 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5078 d1c2dd75 Iustin Pop
    else:
5079 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5080 d61df03e Iustin Pop
5081 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5082 d61df03e Iustin Pop
5083 8d528b7c Iustin Pop
  def Run(self, name, validate=True, call_fn=rpc.call_iallocator_runner):
5084 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5085 298fe380 Iustin Pop

5086 d1c2dd75 Iustin Pop
    """
5087 d1c2dd75 Iustin Pop
    data = self.in_text
5088 298fe380 Iustin Pop
5089 8d528b7c Iustin Pop
    result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
5090 298fe380 Iustin Pop
5091 8d528b7c Iustin Pop
    if not isinstance(result, tuple) or len(result) != 4:
5092 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5093 8d528b7c Iustin Pop
5094 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5095 8d528b7c Iustin Pop
5096 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5097 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5098 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5099 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Instance allocator call failed: %s,"
5100 d1c2dd75 Iustin Pop
                                 " output: %s" %
5101 8d528b7c Iustin Pop
                                 (fail, stdout+stderr))
5102 8d528b7c Iustin Pop
    self.out_text = stdout
5103 d1c2dd75 Iustin Pop
    if validate:
5104 d1c2dd75 Iustin Pop
      self._ValidateResult()
5105 298fe380 Iustin Pop
5106 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5107 d1c2dd75 Iustin Pop
    """Process the allocator results.
5108 538475ca Iustin Pop

5109 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5110 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5111 538475ca Iustin Pop

5112 d1c2dd75 Iustin Pop
    """
5113 d1c2dd75 Iustin Pop
    try:
5114 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5115 d1c2dd75 Iustin Pop
    except Exception, err:
5116 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5117 d1c2dd75 Iustin Pop
5118 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5119 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5120 538475ca Iustin Pop
5121 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5122 d1c2dd75 Iustin Pop
      if key not in rdict:
5123 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5124 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5125 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5126 538475ca Iustin Pop
5127 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5128 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5129 d1c2dd75 Iustin Pop
                               " is not a list")
5130 d1c2dd75 Iustin Pop
    self.out_data = rdict
5131 538475ca Iustin Pop
5132 538475ca Iustin Pop
5133 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5134 d61df03e Iustin Pop
  """Run allocator tests.
5135 d61df03e Iustin Pop

5136 d61df03e Iustin Pop
  This LU runs the allocator tests
5137 d61df03e Iustin Pop

5138 d61df03e Iustin Pop
  """
5139 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5140 d61df03e Iustin Pop
5141 d61df03e Iustin Pop
  def CheckPrereq(self):
5142 d61df03e Iustin Pop
    """Check prerequisites.
5143 d61df03e Iustin Pop

5144 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5145 d61df03e Iustin Pop

5146 d61df03e Iustin Pop
    """
5147 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5148 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5149 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5150 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5151 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5152 d61df03e Iustin Pop
                                     attr)
5153 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5154 d61df03e Iustin Pop
      if iname is not None:
5155 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5156 d61df03e Iustin Pop
                                   iname)
5157 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5158 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5159 d61df03e Iustin Pop
      for row in self.op.nics:
5160 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5161 d61df03e Iustin Pop
            "mac" not in row or
5162 d61df03e Iustin Pop
            "ip" not in row or
5163 d61df03e Iustin Pop
            "bridge" not in row):
5164 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5165 d61df03e Iustin Pop
                                     " 'nics' parameter")
5166 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5167 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5168 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5169 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5170 d61df03e Iustin Pop
      for row in self.op.disks:
5171 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5172 d61df03e Iustin Pop
            "size" not in row or
5173 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5174 d61df03e Iustin Pop
            "mode" not in row or
5175 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5176 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5177 d61df03e Iustin Pop
                                     " 'disks' parameter")
5178 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5179 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5180 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5181 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5182 d61df03e Iustin Pop
      if fname is None:
5183 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5184 d61df03e Iustin Pop
                                   self.op.name)
5185 d61df03e Iustin Pop
      self.op.name = fname
5186 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5187 d61df03e Iustin Pop
    else:
5188 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5189 d61df03e Iustin Pop
                                 self.op.mode)
5190 d61df03e Iustin Pop
5191 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5192 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5193 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5194 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5195 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5196 d61df03e Iustin Pop
                                 self.op.direction)
5197 d61df03e Iustin Pop
5198 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5199 d61df03e Iustin Pop
    """Run the allocator test.
5200 d61df03e Iustin Pop

5201 d61df03e Iustin Pop
    """
5202 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5203 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5204 29859cb7 Iustin Pop
                       mode=self.op.mode,
5205 29859cb7 Iustin Pop
                       name=self.op.name,
5206 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5207 29859cb7 Iustin Pop
                       disks=self.op.disks,
5208 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5209 29859cb7 Iustin Pop
                       os=self.op.os,
5210 29859cb7 Iustin Pop
                       tags=self.op.tags,
5211 29859cb7 Iustin Pop
                       nics=self.op.nics,
5212 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5213 29859cb7 Iustin Pop
                       )
5214 29859cb7 Iustin Pop
    else:
5215 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5216 29859cb7 Iustin Pop
                       mode=self.op.mode,
5217 29859cb7 Iustin Pop
                       name=self.op.name,
5218 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5219 29859cb7 Iustin Pop
                       )
5220 d61df03e Iustin Pop
5221 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5222 d1c2dd75 Iustin Pop
      result = ial.in_text
5223 298fe380 Iustin Pop
    else:
5224 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5225 d1c2dd75 Iustin Pop
      result = ial.out_text
5226 298fe380 Iustin Pop
    return result