Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ c6d58a2b

History | View | Annotate | Download (176.8 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 a8083063 Iustin Pop
from ganeti import config
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 a8083063 Iustin Pop
from ganeti import ssconf
45 8d14b30d Iustin Pop
from ganeti import serializer
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
53 a8083063 Iustin Pop
      with all the fields (even if as None)
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 c6d58a2b Michael Hanselmann
    - optionally redefine their run requirements (REQ_MASTER); note that all
58 c6d58a2b Michael Hanselmann
      commands require root permissions
59 a8083063 Iustin Pop

60 a8083063 Iustin Pop
  """
61 a8083063 Iustin Pop
  HPATH = None
62 a8083063 Iustin Pop
  HTYPE = None
63 a8083063 Iustin Pop
  _OP_REQP = []
64 a8083063 Iustin Pop
  REQ_MASTER = True
65 a8083063 Iustin Pop
66 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
67 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
68 a8083063 Iustin Pop

69 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
70 a8083063 Iustin Pop
    validity.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    """
73 5bfac263 Iustin Pop
    self.proc = processor
74 a8083063 Iustin Pop
    self.op = op
75 a8083063 Iustin Pop
    self.cfg = cfg
76 a8083063 Iustin Pop
    self.sstore = sstore
77 c92b310a Michael Hanselmann
    self.__ssh = None
78 c92b310a Michael Hanselmann
79 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
80 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
81 a8083063 Iustin Pop
      if attr_val is None:
82 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
83 3ecf6786 Iustin Pop
                                   attr_name)
84 c6d58a2b Michael Hanselmann
85 c6d58a2b Michael Hanselmann
    if not cfg.IsCluster():
86 c6d58a2b Michael Hanselmann
      raise errors.OpPrereqError("Cluster not initialized yet,"
87 c6d58a2b Michael Hanselmann
                                 " use 'gnt-cluster init' first.")
88 c6d58a2b Michael Hanselmann
    if self.REQ_MASTER:
89 c6d58a2b Michael Hanselmann
      master = sstore.GetMasterNode()
90 c6d58a2b Michael Hanselmann
      if master != utils.HostInfo().name:
91 c6d58a2b Michael Hanselmann
        raise errors.OpPrereqError("Commands must be run on the master"
92 c6d58a2b Michael Hanselmann
                                   " node %s" % master)
93 a8083063 Iustin Pop
94 c92b310a Michael Hanselmann
  def __GetSSH(self):
95 c92b310a Michael Hanselmann
    """Returns the SshRunner object
96 c92b310a Michael Hanselmann

97 c92b310a Michael Hanselmann
    """
98 c92b310a Michael Hanselmann
    if not self.__ssh:
99 1ff08570 Michael Hanselmann
      self.__ssh = ssh.SshRunner(self.sstore)
100 c92b310a Michael Hanselmann
    return self.__ssh
101 c92b310a Michael Hanselmann
102 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
103 c92b310a Michael Hanselmann
104 a8083063 Iustin Pop
  def CheckPrereq(self):
105 a8083063 Iustin Pop
    """Check prerequisites for this LU.
106 a8083063 Iustin Pop

107 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
108 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
109 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
110 a8083063 Iustin Pop
    allowed.
111 a8083063 Iustin Pop

112 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
113 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
114 a8083063 Iustin Pop

115 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
116 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
117 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
118 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
119 a8083063 Iustin Pop

120 a8083063 Iustin Pop
    """
121 a8083063 Iustin Pop
    raise NotImplementedError
122 a8083063 Iustin Pop
123 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
124 a8083063 Iustin Pop
    """Execute the LU.
125 a8083063 Iustin Pop

126 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
127 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
128 a8083063 Iustin Pop
    code, or expected.
129 a8083063 Iustin Pop

130 a8083063 Iustin Pop
    """
131 a8083063 Iustin Pop
    raise NotImplementedError
132 a8083063 Iustin Pop
133 a8083063 Iustin Pop
  def BuildHooksEnv(self):
134 a8083063 Iustin Pop
    """Build hooks environment for this LU.
135 a8083063 Iustin Pop

136 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
137 a8083063 Iustin Pop
    containing the environment that will be used for running the
138 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
139 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
140 a8083063 Iustin Pop
    the hook should run after the execution.
141 a8083063 Iustin Pop

142 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
143 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
144 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
145 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
146 a8083063 Iustin Pop

147 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
148 a8083063 Iustin Pop

149 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
150 a8083063 Iustin Pop
    not be called.
151 a8083063 Iustin Pop

152 a8083063 Iustin Pop
    """
153 a8083063 Iustin Pop
    raise NotImplementedError
154 a8083063 Iustin Pop
155 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
156 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
157 1fce5219 Guido Trotter

158 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
159 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
160 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
161 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
162 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
163 1fce5219 Guido Trotter

164 1fce5219 Guido Trotter
    Args:
165 1fce5219 Guido Trotter
      phase: the hooks phase that has just been run
166 1fce5219 Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
167 1fce5219 Guido Trotter
      feedback_fn: function to send feedback back to the caller
168 1fce5219 Guido Trotter
      lu_result: the previous result this LU had, or None in the PRE phase.
169 1fce5219 Guido Trotter

170 1fce5219 Guido Trotter
    """
171 1fce5219 Guido Trotter
    return lu_result
172 1fce5219 Guido Trotter
173 a8083063 Iustin Pop
174 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
175 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
176 a8083063 Iustin Pop

177 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
178 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
179 a8083063 Iustin Pop

180 a8083063 Iustin Pop
  """
181 a8083063 Iustin Pop
  HPATH = None
182 a8083063 Iustin Pop
  HTYPE = None
183 a8083063 Iustin Pop
184 a8083063 Iustin Pop
185 9440aeab Michael Hanselmann
def _AddHostToEtcHosts(hostname):
186 9440aeab Michael Hanselmann
  """Wrapper around utils.SetEtcHostsEntry.
187 9440aeab Michael Hanselmann

188 9440aeab Michael Hanselmann
  """
189 9440aeab Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
190 9440aeab Michael Hanselmann
  utils.SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
191 9440aeab Michael Hanselmann
192 9440aeab Michael Hanselmann
193 c8a0948f Michael Hanselmann
def _RemoveHostFromEtcHosts(hostname):
194 9440aeab Michael Hanselmann
  """Wrapper around utils.RemoveEtcHostsEntry.
195 c8a0948f Michael Hanselmann

196 c8a0948f Michael Hanselmann
  """
197 c8a0948f Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
198 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
199 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
200 c8a0948f Michael Hanselmann
201 c8a0948f Michael Hanselmann
202 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
203 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
204 83120a01 Michael Hanselmann

205 83120a01 Michael Hanselmann
  Args:
206 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
207 83120a01 Michael Hanselmann

208 83120a01 Michael Hanselmann
  """
209 3312b702 Iustin Pop
  if not isinstance(nodes, list):
210 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
211 dcb93971 Michael Hanselmann
212 dcb93971 Michael Hanselmann
  if nodes:
213 3312b702 Iustin Pop
    wanted = []
214 dcb93971 Michael Hanselmann
215 dcb93971 Michael Hanselmann
    for name in nodes:
216 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
217 dcb93971 Michael Hanselmann
      if node is None:
218 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
219 3312b702 Iustin Pop
      wanted.append(node)
220 dcb93971 Michael Hanselmann
221 dcb93971 Michael Hanselmann
  else:
222 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
223 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
224 3312b702 Iustin Pop
225 3312b702 Iustin Pop
226 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
227 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
228 3312b702 Iustin Pop

229 3312b702 Iustin Pop
  Args:
230 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
231 3312b702 Iustin Pop

232 3312b702 Iustin Pop
  """
233 3312b702 Iustin Pop
  if not isinstance(instances, list):
234 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
235 3312b702 Iustin Pop
236 3312b702 Iustin Pop
  if instances:
237 3312b702 Iustin Pop
    wanted = []
238 3312b702 Iustin Pop
239 3312b702 Iustin Pop
    for name in instances:
240 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
241 3312b702 Iustin Pop
      if instance is None:
242 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
243 3312b702 Iustin Pop
      wanted.append(instance)
244 3312b702 Iustin Pop
245 3312b702 Iustin Pop
  else:
246 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
247 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
248 dcb93971 Michael Hanselmann
249 dcb93971 Michael Hanselmann
250 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
251 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
252 83120a01 Michael Hanselmann

253 83120a01 Michael Hanselmann
  Args:
254 83120a01 Michael Hanselmann
    static: Static fields
255 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
256 83120a01 Michael Hanselmann

257 83120a01 Michael Hanselmann
  """
258 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
259 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
260 dcb93971 Michael Hanselmann
261 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
262 dcb93971 Michael Hanselmann
263 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
264 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
265 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
266 3ecf6786 Iustin Pop
                                          difference(all_fields)))
267 dcb93971 Michael Hanselmann
268 dcb93971 Michael Hanselmann
269 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
270 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
271 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
272 ecb215b5 Michael Hanselmann

273 ecb215b5 Michael Hanselmann
  Args:
274 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
275 396e1b78 Michael Hanselmann
  """
276 396e1b78 Michael Hanselmann
  env = {
277 0e137c28 Iustin Pop
    "OP_TARGET": name,
278 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
279 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
280 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
281 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
282 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
283 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
284 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
285 396e1b78 Michael Hanselmann
  }
286 396e1b78 Michael Hanselmann
287 396e1b78 Michael Hanselmann
  if nics:
288 396e1b78 Michael Hanselmann
    nic_count = len(nics)
289 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
290 396e1b78 Michael Hanselmann
      if ip is None:
291 396e1b78 Michael Hanselmann
        ip = ""
292 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
293 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
294 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
295 396e1b78 Michael Hanselmann
  else:
296 396e1b78 Michael Hanselmann
    nic_count = 0
297 396e1b78 Michael Hanselmann
298 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
299 396e1b78 Michael Hanselmann
300 396e1b78 Michael Hanselmann
  return env
301 396e1b78 Michael Hanselmann
302 396e1b78 Michael Hanselmann
303 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
304 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
305 ecb215b5 Michael Hanselmann

306 ecb215b5 Michael Hanselmann
  Args:
307 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
308 ecb215b5 Michael Hanselmann
    override: dict of values to override
309 ecb215b5 Michael Hanselmann
  """
310 396e1b78 Michael Hanselmann
  args = {
311 396e1b78 Michael Hanselmann
    'name': instance.name,
312 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
313 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
314 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
315 396e1b78 Michael Hanselmann
    'status': instance.os,
316 396e1b78 Michael Hanselmann
    'memory': instance.memory,
317 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
318 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
319 396e1b78 Michael Hanselmann
  }
320 396e1b78 Michael Hanselmann
  if override:
321 396e1b78 Michael Hanselmann
    args.update(override)
322 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
323 396e1b78 Michael Hanselmann
324 396e1b78 Michael Hanselmann
325 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
326 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
327 a8083063 Iustin Pop

328 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
329 a8083063 Iustin Pop
  is the error message.
330 a8083063 Iustin Pop

331 a8083063 Iustin Pop
  """
332 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
333 a8083063 Iustin Pop
  if vgsize is None:
334 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
335 a8083063 Iustin Pop
  elif vgsize < 20480:
336 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
337 191a8385 Guido Trotter
            (vgname, vgsize))
338 a8083063 Iustin Pop
  return None
339 a8083063 Iustin Pop
340 a8083063 Iustin Pop
341 a8083063 Iustin Pop
def _InitSSHSetup(node):
342 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
343 a8083063 Iustin Pop

344 a8083063 Iustin Pop

345 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
346 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
347 a8083063 Iustin Pop

348 a8083063 Iustin Pop
  Args:
349 a8083063 Iustin Pop
    node: the name of this host as a fqdn
350 a8083063 Iustin Pop

351 a8083063 Iustin Pop
  """
352 70d9e3d8 Iustin Pop
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
353 a8083063 Iustin Pop
354 70d9e3d8 Iustin Pop
  for name in priv_key, pub_key:
355 70d9e3d8 Iustin Pop
    if os.path.exists(name):
356 70d9e3d8 Iustin Pop
      utils.CreateBackup(name)
357 70d9e3d8 Iustin Pop
    utils.RemoveFile(name)
358 a8083063 Iustin Pop
359 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
360 70d9e3d8 Iustin Pop
                         "-f", priv_key,
361 a8083063 Iustin Pop
                         "-q", "-N", ""])
362 a8083063 Iustin Pop
  if result.failed:
363 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
364 3ecf6786 Iustin Pop
                             result.output)
365 a8083063 Iustin Pop
366 70d9e3d8 Iustin Pop
  f = open(pub_key, 'r')
367 a8083063 Iustin Pop
  try:
368 70d9e3d8 Iustin Pop
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
369 a8083063 Iustin Pop
  finally:
370 a8083063 Iustin Pop
    f.close()
371 a8083063 Iustin Pop
372 a8083063 Iustin Pop
373 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
374 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
375 a8083063 Iustin Pop

376 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
377 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
378 a8083063 Iustin Pop

379 a8083063 Iustin Pop
  """
380 a8083063 Iustin Pop
  # Create pseudo random password
381 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
382 a8083063 Iustin Pop
  # and write it into sstore
383 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
384 a8083063 Iustin Pop
385 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
386 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
387 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
388 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
389 a8083063 Iustin Pop
  if result.failed:
390 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
391 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
392 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
393 a8083063 Iustin Pop
394 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
395 a8083063 Iustin Pop
396 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
397 a8083063 Iustin Pop
398 a8083063 Iustin Pop
  if result.failed:
399 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
400 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
401 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
402 a8083063 Iustin Pop
403 a8083063 Iustin Pop
404 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
405 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
406 bf6929a2 Alexander Schreiber

407 bf6929a2 Alexander Schreiber
  """
408 bf6929a2 Alexander Schreiber
  # check bridges existance
409 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
410 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
411 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
412 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
413 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
414 bf6929a2 Alexander Schreiber
415 bf6929a2 Alexander Schreiber
416 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
417 a8083063 Iustin Pop
  """Initialise the cluster.
418 a8083063 Iustin Pop

419 a8083063 Iustin Pop
  """
420 a8083063 Iustin Pop
  HPATH = "cluster-init"
421 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
422 efa14262 Manuel Franceschini
  _OP_REQP = ["cluster_name", "hypervisor_type", "mac_prefix",
423 871705db Manuel Franceschini
              "def_bridge", "master_netdev", "file_storage_dir"]
424 a8083063 Iustin Pop
  REQ_CLUSTER = False
425 a8083063 Iustin Pop
426 a8083063 Iustin Pop
  def BuildHooksEnv(self):
427 a8083063 Iustin Pop
    """Build hooks env.
428 a8083063 Iustin Pop

429 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
430 a8083063 Iustin Pop
    ourselves in the post-run node list.
431 a8083063 Iustin Pop

432 a8083063 Iustin Pop
    """
433 0e137c28 Iustin Pop
    env = {"OP_TARGET": self.op.cluster_name}
434 0e137c28 Iustin Pop
    return env, [], [self.hostname.name]
435 a8083063 Iustin Pop
436 a8083063 Iustin Pop
  def CheckPrereq(self):
437 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
438 a8083063 Iustin Pop

439 a8083063 Iustin Pop
    """
440 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
441 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
442 a8083063 Iustin Pop
443 2a6469d5 Alexander Schreiber
    if self.op.hypervisor_type == constants.HT_XEN_HVM31:
444 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
445 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Please prepare the cluster VNC"
446 2a6469d5 Alexander Schreiber
                                   "password file %s" %
447 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
448 2a6469d5 Alexander Schreiber
449 89e1fc26 Iustin Pop
    self.hostname = hostname = utils.HostInfo()
450 ff98055b Iustin Pop
451 bcf043c9 Iustin Pop
    if hostname.ip.startswith("127."):
452 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
453 107711b0 Michael Hanselmann
                                 " range (%s). Please fix DNS or %s." %
454 107711b0 Michael Hanselmann
                                 (hostname.ip, constants.ETC_HOSTS))
455 130e907e Iustin Pop
456 b15d625f Iustin Pop
    if not utils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT,
457 b15d625f Iustin Pop
                         source=constants.LOCALHOST_IP_ADDRESS):
458 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
459 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
460 3ecf6786 Iustin Pop
                                 " belong to this host."
461 bcf043c9 Iustin Pop
                                 " Aborting." % hostname.ip)
462 a8083063 Iustin Pop
463 411f8ad0 Iustin Pop
    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
464 411f8ad0 Iustin Pop
465 411f8ad0 Iustin Pop
    if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
466 411f8ad0 Iustin Pop
                     timeout=5):
467 411f8ad0 Iustin Pop
      raise errors.OpPrereqError("Cluster IP already active. Aborting.")
468 411f8ad0 Iustin Pop
469 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
470 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
471 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
472 16abfbc2 Alexander Schreiber
    if (secondary_ip and
473 16abfbc2 Alexander Schreiber
        secondary_ip != hostname.ip and
474 b15d625f Iustin Pop
        (not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
475 b15d625f Iustin Pop
                           source=constants.LOCALHOST_IP_ADDRESS))):
476 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("You gave %s as secondary IP,"
477 f4bc1f2c Michael Hanselmann
                                 " but it does not belong to this host." %
478 16abfbc2 Alexander Schreiber
                                 secondary_ip)
479 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
480 a8083063 Iustin Pop
481 efa14262 Manuel Franceschini
    if not hasattr(self.op, "vg_name"):
482 efa14262 Manuel Franceschini
      self.op.vg_name = None
483 efa14262 Manuel Franceschini
    # if vg_name not None, checks if volume group is valid
484 efa14262 Manuel Franceschini
    if self.op.vg_name:
485 efa14262 Manuel Franceschini
      vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
486 efa14262 Manuel Franceschini
      if vgstatus:
487 efa14262 Manuel Franceschini
        raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
488 efa14262 Manuel Franceschini
                                   " you are not using lvm" % vgstatus)
489 a8083063 Iustin Pop
490 2872a949 Manuel Franceschini
    self.op.file_storage_dir = os.path.normpath(self.op.file_storage_dir)
491 2872a949 Manuel Franceschini
492 871705db Manuel Franceschini
    if not os.path.isabs(self.op.file_storage_dir):
493 871705db Manuel Franceschini
      raise errors.OpPrereqError("The file storage directory you have is"
494 871705db Manuel Franceschini
                                 " not an absolute path.")
495 871705db Manuel Franceschini
496 871705db Manuel Franceschini
    if not os.path.exists(self.op.file_storage_dir):
497 2872a949 Manuel Franceschini
      try:
498 2872a949 Manuel Franceschini
        os.makedirs(self.op.file_storage_dir, 0750)
499 2872a949 Manuel Franceschini
      except OSError, err:
500 2872a949 Manuel Franceschini
        raise errors.OpPrereqError("Cannot create file storage directory"
501 2872a949 Manuel Franceschini
                                   " '%s': %s" %
502 2872a949 Manuel Franceschini
                                   (self.op.file_storage_dir, err))
503 2872a949 Manuel Franceschini
504 2872a949 Manuel Franceschini
    if not os.path.isdir(self.op.file_storage_dir):
505 2872a949 Manuel Franceschini
      raise errors.OpPrereqError("The file storage directory '%s' is not"
506 2872a949 Manuel Franceschini
                                 " a directory." % self.op.file_storage_dir)
507 871705db Manuel Franceschini
508 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
509 a8083063 Iustin Pop
                    self.op.mac_prefix):
510 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
511 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
512 a8083063 Iustin Pop
513 2584d4a4 Alexander Schreiber
    if self.op.hypervisor_type not in constants.HYPER_TYPES:
514 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
515 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
516 a8083063 Iustin Pop
517 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
518 880478f8 Iustin Pop
    if result.failed:
519 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
520 8925faaa Iustin Pop
                                 (self.op.master_netdev,
521 8925faaa Iustin Pop
                                  result.output.strip()))
522 880478f8 Iustin Pop
523 7dd30006 Michael Hanselmann
    if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
524 7dd30006 Michael Hanselmann
            os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
525 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("Init.d script '%s' missing or not"
526 f4bc1f2c Michael Hanselmann
                                 " executable." % constants.NODE_INITD_SCRIPT)
527 c7b46d59 Iustin Pop
528 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
529 a8083063 Iustin Pop
    """Initialize the cluster.
530 a8083063 Iustin Pop

531 a8083063 Iustin Pop
    """
532 a8083063 Iustin Pop
    clustername = self.clustername
533 a8083063 Iustin Pop
    hostname = self.hostname
534 a8083063 Iustin Pop
535 a8083063 Iustin Pop
    # set up the simple store
536 4167825b Iustin Pop
    self.sstore = ss = ssconf.SimpleStore()
537 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
538 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
539 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
540 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
541 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
542 871705db Manuel Franceschini
    ss.SetKey(ss.SS_FILE_STORAGE_DIR, self.op.file_storage_dir)
543 243cdbcc Michael Hanselmann
    ss.SetKey(ss.SS_CONFIG_VERSION, constants.CONFIG_VERSION)
544 a8083063 Iustin Pop
545 a8083063 Iustin Pop
    # set up the inter-node password and certificate
546 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
547 a8083063 Iustin Pop
548 a8083063 Iustin Pop
    # start the master ip
549 bcf043c9 Iustin Pop
    rpc.call_node_start_master(hostname.name)
550 a8083063 Iustin Pop
551 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
552 70d9e3d8 Iustin Pop
    f = open(constants.SSH_HOST_RSA_PUB, 'r')
553 a8083063 Iustin Pop
    try:
554 a8083063 Iustin Pop
      sshline = f.read()
555 a8083063 Iustin Pop
    finally:
556 a8083063 Iustin Pop
      f.close()
557 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
558 a8083063 Iustin Pop
559 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(hostname.name)
560 bcf043c9 Iustin Pop
    _InitSSHSetup(hostname.name)
561 a8083063 Iustin Pop
562 a8083063 Iustin Pop
    # init of cluster config file
563 4167825b Iustin Pop
    self.cfg = cfgw = config.ConfigWriter()
564 bcf043c9 Iustin Pop
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
565 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
566 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
567 a8083063 Iustin Pop
568 f408b346 Michael Hanselmann
    ssh.WriteKnownHostsFile(cfgw, ss, constants.SSH_KNOWN_HOSTS_FILE)
569 f408b346 Michael Hanselmann
570 a8083063 Iustin Pop
571 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
572 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
573 a8083063 Iustin Pop

574 a8083063 Iustin Pop
  """
575 a8083063 Iustin Pop
  _OP_REQP = []
576 a8083063 Iustin Pop
577 a8083063 Iustin Pop
  def CheckPrereq(self):
578 a8083063 Iustin Pop
    """Check prerequisites.
579 a8083063 Iustin Pop

580 a8083063 Iustin Pop
    This checks whether the cluster is empty.
581 a8083063 Iustin Pop

582 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
583 a8083063 Iustin Pop

584 a8083063 Iustin Pop
    """
585 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
586 a8083063 Iustin Pop
587 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
588 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
589 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
590 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
591 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
592 db915bd1 Michael Hanselmann
    if instancelist:
593 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
594 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
595 a8083063 Iustin Pop
596 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
597 a8083063 Iustin Pop
    """Destroys the cluster.
598 a8083063 Iustin Pop

599 a8083063 Iustin Pop
    """
600 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
601 c9064964 Iustin Pop
    if not rpc.call_node_stop_master(master):
602 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
603 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
604 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
605 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
606 c8a0948f Michael Hanselmann
    rpc.call_node_leave_cluster(master)
607 a8083063 Iustin Pop
608 a8083063 Iustin Pop
609 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
610 a8083063 Iustin Pop
  """Verifies the cluster status.
611 a8083063 Iustin Pop

612 a8083063 Iustin Pop
  """
613 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
614 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
615 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
616 a8083063 Iustin Pop
617 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
618 a8083063 Iustin Pop
                  remote_version, feedback_fn):
619 a8083063 Iustin Pop
    """Run multiple tests against a node.
620 a8083063 Iustin Pop

621 a8083063 Iustin Pop
    Test list:
622 a8083063 Iustin Pop
      - compares ganeti version
623 a8083063 Iustin Pop
      - checks vg existance and size > 20G
624 a8083063 Iustin Pop
      - checks config file checksum
625 a8083063 Iustin Pop
      - checks ssh to other nodes
626 a8083063 Iustin Pop

627 a8083063 Iustin Pop
    Args:
628 a8083063 Iustin Pop
      node: name of the node to check
629 a8083063 Iustin Pop
      file_list: required list of files
630 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
631 098c0958 Michael Hanselmann

632 a8083063 Iustin Pop
    """
633 a8083063 Iustin Pop
    # compares ganeti version
634 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
635 a8083063 Iustin Pop
    if not remote_version:
636 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
637 a8083063 Iustin Pop
      return True
638 a8083063 Iustin Pop
639 a8083063 Iustin Pop
    if local_version != remote_version:
640 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
641 a8083063 Iustin Pop
                      (local_version, node, remote_version))
642 a8083063 Iustin Pop
      return True
643 a8083063 Iustin Pop
644 a8083063 Iustin Pop
    # checks vg existance and size > 20G
645 a8083063 Iustin Pop
646 a8083063 Iustin Pop
    bad = False
647 a8083063 Iustin Pop
    if not vglist:
648 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
649 a8083063 Iustin Pop
                      (node,))
650 a8083063 Iustin Pop
      bad = True
651 a8083063 Iustin Pop
    else:
652 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
653 a8083063 Iustin Pop
      if vgstatus:
654 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
655 a8083063 Iustin Pop
        bad = True
656 a8083063 Iustin Pop
657 a8083063 Iustin Pop
    # checks config file checksum
658 a8083063 Iustin Pop
    # checks ssh to any
659 a8083063 Iustin Pop
660 a8083063 Iustin Pop
    if 'filelist' not in node_result:
661 a8083063 Iustin Pop
      bad = True
662 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
663 a8083063 Iustin Pop
    else:
664 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
665 a8083063 Iustin Pop
      for file_name in file_list:
666 a8083063 Iustin Pop
        if file_name not in remote_cksum:
667 a8083063 Iustin Pop
          bad = True
668 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
669 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
670 a8083063 Iustin Pop
          bad = True
671 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
672 a8083063 Iustin Pop
673 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
674 a8083063 Iustin Pop
      bad = True
675 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
676 a8083063 Iustin Pop
    else:
677 a8083063 Iustin Pop
      if node_result['nodelist']:
678 a8083063 Iustin Pop
        bad = True
679 a8083063 Iustin Pop
        for node in node_result['nodelist']:
680 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
681 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
682 9d4bfc96 Iustin Pop
    if 'node-net-test' not in node_result:
683 9d4bfc96 Iustin Pop
      bad = True
684 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
685 9d4bfc96 Iustin Pop
    else:
686 9d4bfc96 Iustin Pop
      if node_result['node-net-test']:
687 9d4bfc96 Iustin Pop
        bad = True
688 9d4bfc96 Iustin Pop
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
689 9d4bfc96 Iustin Pop
        for node in nlist:
690 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
691 9d4bfc96 Iustin Pop
                          (node, node_result['node-net-test'][node]))
692 9d4bfc96 Iustin Pop
693 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
694 a8083063 Iustin Pop
    if hyp_result is not None:
695 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
696 a8083063 Iustin Pop
    return bad
697 a8083063 Iustin Pop
698 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
699 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
700 a8083063 Iustin Pop
    """Verify an instance.
701 a8083063 Iustin Pop

702 a8083063 Iustin Pop
    This function checks to see if the required block devices are
703 a8083063 Iustin Pop
    available on the instance's node.
704 a8083063 Iustin Pop

705 a8083063 Iustin Pop
    """
706 a8083063 Iustin Pop
    bad = False
707 a8083063 Iustin Pop
708 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
709 a8083063 Iustin Pop
710 a8083063 Iustin Pop
    node_vol_should = {}
711 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
712 a8083063 Iustin Pop
713 a8083063 Iustin Pop
    for node in node_vol_should:
714 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
715 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
716 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
717 a8083063 Iustin Pop
                          (volume, node))
718 a8083063 Iustin Pop
          bad = True
719 a8083063 Iustin Pop
720 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
721 a872dae6 Guido Trotter
      if (node_current not in node_instance or
722 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
723 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
724 a8083063 Iustin Pop
                        (instance, node_current))
725 a8083063 Iustin Pop
        bad = True
726 a8083063 Iustin Pop
727 a8083063 Iustin Pop
    for node in node_instance:
728 a8083063 Iustin Pop
      if (not node == node_current):
729 a8083063 Iustin Pop
        if instance in node_instance[node]:
730 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
731 a8083063 Iustin Pop
                          (instance, node))
732 a8083063 Iustin Pop
          bad = True
733 a8083063 Iustin Pop
734 6a438c98 Michael Hanselmann
    return bad
735 a8083063 Iustin Pop
736 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
737 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
738 a8083063 Iustin Pop

739 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
740 a8083063 Iustin Pop
    reported as unknown.
741 a8083063 Iustin Pop

742 a8083063 Iustin Pop
    """
743 a8083063 Iustin Pop
    bad = False
744 a8083063 Iustin Pop
745 a8083063 Iustin Pop
    for node in node_vol_is:
746 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
747 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
748 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
749 a8083063 Iustin Pop
                      (volume, node))
750 a8083063 Iustin Pop
          bad = True
751 a8083063 Iustin Pop
    return bad
752 a8083063 Iustin Pop
753 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
754 a8083063 Iustin Pop
    """Verify the list of running instances.
755 a8083063 Iustin Pop

756 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
757 a8083063 Iustin Pop

758 a8083063 Iustin Pop
    """
759 a8083063 Iustin Pop
    bad = False
760 a8083063 Iustin Pop
    for node in node_instance:
761 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
762 a8083063 Iustin Pop
        if runninginstance not in instancelist:
763 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
764 a8083063 Iustin Pop
                          (runninginstance, node))
765 a8083063 Iustin Pop
          bad = True
766 a8083063 Iustin Pop
    return bad
767 a8083063 Iustin Pop
768 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
769 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
770 2b3b6ddd Guido Trotter

771 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
772 2b3b6ddd Guido Trotter
    was primary for.
773 2b3b6ddd Guido Trotter

774 2b3b6ddd Guido Trotter
    """
775 2b3b6ddd Guido Trotter
    bad = False
776 2b3b6ddd Guido Trotter
777 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
778 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
779 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
780 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
781 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
782 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
783 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
784 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
785 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
786 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
787 2b3b6ddd Guido Trotter
        needed_mem = 0
788 2b3b6ddd Guido Trotter
        for instance in instances:
789 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
790 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
791 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
792 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
793 2b3b6ddd Guido Trotter
          bad = True
794 2b3b6ddd Guido Trotter
    return bad
795 2b3b6ddd Guido Trotter
796 a8083063 Iustin Pop
  def CheckPrereq(self):
797 a8083063 Iustin Pop
    """Check prerequisites.
798 a8083063 Iustin Pop

799 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
800 e54c4c5e Guido Trotter
    all its members are valid.
801 a8083063 Iustin Pop

802 a8083063 Iustin Pop
    """
803 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
804 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
805 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
806 a8083063 Iustin Pop
807 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
808 d8fff41c Guido Trotter
    """Build hooks env.
809 d8fff41c Guido Trotter

810 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
811 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
812 d8fff41c Guido Trotter

813 d8fff41c Guido Trotter
    """
814 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
815 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
816 d8fff41c Guido Trotter
    env = {}
817 d8fff41c Guido Trotter
    return env, [], all_nodes
818 d8fff41c Guido Trotter
819 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
820 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
821 a8083063 Iustin Pop

822 a8083063 Iustin Pop
    """
823 a8083063 Iustin Pop
    bad = False
824 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
825 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
826 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
827 a8083063 Iustin Pop
828 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
829 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
830 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
831 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
832 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
833 a8083063 Iustin Pop
    node_volume = {}
834 a8083063 Iustin Pop
    node_instance = {}
835 9c9c7d30 Guido Trotter
    node_info = {}
836 26b6af5e Guido Trotter
    instance_cfg = {}
837 a8083063 Iustin Pop
838 a8083063 Iustin Pop
    # FIXME: verify OS list
839 a8083063 Iustin Pop
    # do local checksums
840 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
841 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
842 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
843 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
844 a8083063 Iustin Pop
845 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
846 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
847 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
848 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
849 a8083063 Iustin Pop
    node_verify_param = {
850 a8083063 Iustin Pop
      'filelist': file_names,
851 a8083063 Iustin Pop
      'nodelist': nodelist,
852 a8083063 Iustin Pop
      'hypervisor': None,
853 9d4bfc96 Iustin Pop
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
854 9d4bfc96 Iustin Pop
                        for node in nodeinfo]
855 a8083063 Iustin Pop
      }
856 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
857 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
858 9c9c7d30 Guido Trotter
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
859 a8083063 Iustin Pop
860 a8083063 Iustin Pop
    for node in nodelist:
861 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
862 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
863 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
864 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
865 a8083063 Iustin Pop
      bad = bad or result
866 a8083063 Iustin Pop
867 a8083063 Iustin Pop
      # node_volume
868 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
869 a8083063 Iustin Pop
870 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
871 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
872 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
873 b63ed789 Iustin Pop
        bad = True
874 b63ed789 Iustin Pop
        node_volume[node] = {}
875 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
876 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
877 a8083063 Iustin Pop
        bad = True
878 a8083063 Iustin Pop
        continue
879 b63ed789 Iustin Pop
      else:
880 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
881 a8083063 Iustin Pop
882 a8083063 Iustin Pop
      # node_instance
883 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
884 a8083063 Iustin Pop
      if type(nodeinstance) != list:
885 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
886 a8083063 Iustin Pop
        bad = True
887 a8083063 Iustin Pop
        continue
888 a8083063 Iustin Pop
889 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
890 a8083063 Iustin Pop
891 9c9c7d30 Guido Trotter
      # node_info
892 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
893 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
894 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
895 9c9c7d30 Guido Trotter
        bad = True
896 9c9c7d30 Guido Trotter
        continue
897 9c9c7d30 Guido Trotter
898 9c9c7d30 Guido Trotter
      try:
899 9c9c7d30 Guido Trotter
        node_info[node] = {
900 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
901 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
902 93e4c50b Guido Trotter
          "pinst": [],
903 93e4c50b Guido Trotter
          "sinst": [],
904 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
905 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
906 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
907 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
908 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
909 36e7da50 Guido Trotter
          # secondary.
910 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
911 9c9c7d30 Guido Trotter
        }
912 9c9c7d30 Guido Trotter
      except ValueError:
913 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
914 9c9c7d30 Guido Trotter
        bad = True
915 9c9c7d30 Guido Trotter
        continue
916 9c9c7d30 Guido Trotter
917 a8083063 Iustin Pop
    node_vol_should = {}
918 a8083063 Iustin Pop
919 a8083063 Iustin Pop
    for instance in instancelist:
920 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
921 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
922 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
923 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
924 c5705f58 Guido Trotter
      bad = bad or result
925 a8083063 Iustin Pop
926 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
927 a8083063 Iustin Pop
928 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
929 26b6af5e Guido Trotter
930 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
931 93e4c50b Guido Trotter
      if pnode in node_info:
932 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
933 93e4c50b Guido Trotter
      else:
934 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
935 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
936 93e4c50b Guido Trotter
        bad = True
937 93e4c50b Guido Trotter
938 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
939 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
940 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
941 93e4c50b Guido Trotter
      # supported either.
942 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
943 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
944 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
945 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
946 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
947 93e4c50b Guido Trotter
                    % instance)
948 93e4c50b Guido Trotter
949 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
950 93e4c50b Guido Trotter
        if snode in node_info:
951 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
952 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
953 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
954 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
955 93e4c50b Guido Trotter
        else:
956 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
957 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
958 93e4c50b Guido Trotter
959 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
960 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
961 a8083063 Iustin Pop
                                       feedback_fn)
962 a8083063 Iustin Pop
    bad = bad or result
963 a8083063 Iustin Pop
964 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
965 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
966 a8083063 Iustin Pop
                                         feedback_fn)
967 a8083063 Iustin Pop
    bad = bad or result
968 a8083063 Iustin Pop
969 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
970 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
971 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
972 e54c4c5e Guido Trotter
      bad = bad or result
973 2b3b6ddd Guido Trotter
974 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
975 2b3b6ddd Guido Trotter
    if i_non_redundant:
976 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
977 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
978 2b3b6ddd Guido Trotter
979 a8083063 Iustin Pop
    return int(bad)
980 a8083063 Iustin Pop
981 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
982 d8fff41c Guido Trotter
    """Analize the post-hooks' result, handle it, and send some
983 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
984 d8fff41c Guido Trotter

985 d8fff41c Guido Trotter
    Args:
986 d8fff41c Guido Trotter
      phase: the hooks phase that has just been run
987 d8fff41c Guido Trotter
      hooks_results: the results of the multi-node hooks rpc call
988 d8fff41c Guido Trotter
      feedback_fn: function to send feedback back to the caller
989 d8fff41c Guido Trotter
      lu_result: previous Exec result
990 d8fff41c Guido Trotter

991 d8fff41c Guido Trotter
    """
992 d8fff41c Guido Trotter
    # We only really run POST phase hooks, and are only interested in their results
993 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
994 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
995 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
996 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
997 d8fff41c Guido Trotter
      if not hooks_results:
998 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
999 d8fff41c Guido Trotter
        lu_result = 1
1000 d8fff41c Guido Trotter
      else:
1001 d8fff41c Guido Trotter
        for node_name in hooks_results:
1002 d8fff41c Guido Trotter
          show_node_header = True
1003 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1004 d8fff41c Guido Trotter
          if res is False or not isinstance(res, list):
1005 d8fff41c Guido Trotter
            feedback_fn("    Communication failure")
1006 d8fff41c Guido Trotter
            lu_result = 1
1007 d8fff41c Guido Trotter
            continue
1008 d8fff41c Guido Trotter
          for script, hkr, output in res:
1009 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1010 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1011 d8fff41c Guido Trotter
              # failing hooks on that node
1012 d8fff41c Guido Trotter
              if show_node_header:
1013 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1014 d8fff41c Guido Trotter
                show_node_header = False
1015 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1016 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1017 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1018 d8fff41c Guido Trotter
              lu_result = 1
1019 d8fff41c Guido Trotter
1020 d8fff41c Guido Trotter
      return lu_result
1021 d8fff41c Guido Trotter
1022 a8083063 Iustin Pop
1023 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1024 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1025 2c95a8d4 Iustin Pop

1026 2c95a8d4 Iustin Pop
  """
1027 2c95a8d4 Iustin Pop
  _OP_REQP = []
1028 2c95a8d4 Iustin Pop
1029 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1030 2c95a8d4 Iustin Pop
    """Check prerequisites.
1031 2c95a8d4 Iustin Pop

1032 2c95a8d4 Iustin Pop
    This has no prerequisites.
1033 2c95a8d4 Iustin Pop

1034 2c95a8d4 Iustin Pop
    """
1035 2c95a8d4 Iustin Pop
    pass
1036 2c95a8d4 Iustin Pop
1037 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1038 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1039 2c95a8d4 Iustin Pop

1040 2c95a8d4 Iustin Pop
    """
1041 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1042 2c95a8d4 Iustin Pop
1043 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1044 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1045 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1046 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1047 2c95a8d4 Iustin Pop
1048 2c95a8d4 Iustin Pop
    nv_dict = {}
1049 2c95a8d4 Iustin Pop
    for inst in instances:
1050 2c95a8d4 Iustin Pop
      inst_lvs = {}
1051 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
1052 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1053 2c95a8d4 Iustin Pop
        continue
1054 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1055 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1056 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1057 2c95a8d4 Iustin Pop
        for vol in vol_list:
1058 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1059 2c95a8d4 Iustin Pop
1060 2c95a8d4 Iustin Pop
    if not nv_dict:
1061 2c95a8d4 Iustin Pop
      return result
1062 2c95a8d4 Iustin Pop
1063 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
1064 2c95a8d4 Iustin Pop
1065 2c95a8d4 Iustin Pop
    to_act = set()
1066 2c95a8d4 Iustin Pop
    for node in nodes:
1067 2c95a8d4 Iustin Pop
      # node_volume
1068 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1069 2c95a8d4 Iustin Pop
1070 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1071 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
1072 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1073 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1074 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
1075 2c95a8d4 Iustin Pop
                    (node,))
1076 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1077 2c95a8d4 Iustin Pop
        continue
1078 2c95a8d4 Iustin Pop
1079 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1080 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1081 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1082 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1083 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1084 2c95a8d4 Iustin Pop
1085 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1086 b63ed789 Iustin Pop
    # data better
1087 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1088 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1089 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1090 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1091 b63ed789 Iustin Pop
1092 2c95a8d4 Iustin Pop
    return result
1093 2c95a8d4 Iustin Pop
1094 2c95a8d4 Iustin Pop
1095 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1096 07bd8a51 Iustin Pop
  """Rename the cluster.
1097 07bd8a51 Iustin Pop

1098 07bd8a51 Iustin Pop
  """
1099 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1100 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1101 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1102 07bd8a51 Iustin Pop
1103 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1104 07bd8a51 Iustin Pop
    """Build hooks env.
1105 07bd8a51 Iustin Pop

1106 07bd8a51 Iustin Pop
    """
1107 07bd8a51 Iustin Pop
    env = {
1108 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
1109 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1110 07bd8a51 Iustin Pop
      }
1111 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
1112 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1113 07bd8a51 Iustin Pop
1114 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1115 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1116 07bd8a51 Iustin Pop

1117 07bd8a51 Iustin Pop
    """
1118 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1119 07bd8a51 Iustin Pop
1120 bcf043c9 Iustin Pop
    new_name = hostname.name
1121 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1122 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
1123 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
1124 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1125 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1126 07bd8a51 Iustin Pop
                                 " cluster has changed")
1127 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1128 07bd8a51 Iustin Pop
      result = utils.RunCmd(["fping", "-q", new_ip])
1129 07bd8a51 Iustin Pop
      if not result.failed:
1130 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1131 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1132 07bd8a51 Iustin Pop
                                   new_ip)
1133 07bd8a51 Iustin Pop
1134 07bd8a51 Iustin Pop
    self.op.name = new_name
1135 07bd8a51 Iustin Pop
1136 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1137 07bd8a51 Iustin Pop
    """Rename the cluster.
1138 07bd8a51 Iustin Pop

1139 07bd8a51 Iustin Pop
    """
1140 07bd8a51 Iustin Pop
    clustername = self.op.name
1141 07bd8a51 Iustin Pop
    ip = self.ip
1142 07bd8a51 Iustin Pop
    ss = self.sstore
1143 07bd8a51 Iustin Pop
1144 07bd8a51 Iustin Pop
    # shutdown the master IP
1145 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
1146 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
1147 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1148 07bd8a51 Iustin Pop
1149 07bd8a51 Iustin Pop
    try:
1150 07bd8a51 Iustin Pop
      # modify the sstore
1151 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1152 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1153 07bd8a51 Iustin Pop
1154 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1155 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1156 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1157 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1158 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1159 07bd8a51 Iustin Pop
1160 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1161 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1162 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1163 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1164 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1165 07bd8a51 Iustin Pop
          if not result[to_node]:
1166 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1167 07bd8a51 Iustin Pop
                         (fname, to_node))
1168 07bd8a51 Iustin Pop
    finally:
1169 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
1170 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1171 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1172 07bd8a51 Iustin Pop
1173 07bd8a51 Iustin Pop
1174 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1175 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1176 8084f9f6 Manuel Franceschini

1177 8084f9f6 Manuel Franceschini
  Args:
1178 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1179 8084f9f6 Manuel Franceschini

1180 8084f9f6 Manuel Franceschini
  Returns:
1181 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1182 8084f9f6 Manuel Franceschini

1183 8084f9f6 Manuel Franceschini
  """
1184 8084f9f6 Manuel Franceschini
  if disk.children:
1185 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1186 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1187 8084f9f6 Manuel Franceschini
        return True
1188 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1189 8084f9f6 Manuel Franceschini
1190 8084f9f6 Manuel Franceschini
1191 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1192 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1193 8084f9f6 Manuel Franceschini

1194 8084f9f6 Manuel Franceschini
  """
1195 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1196 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1197 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1198 8084f9f6 Manuel Franceschini
1199 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1200 8084f9f6 Manuel Franceschini
    """Build hooks env.
1201 8084f9f6 Manuel Franceschini

1202 8084f9f6 Manuel Franceschini
    """
1203 8084f9f6 Manuel Franceschini
    env = {
1204 8084f9f6 Manuel Franceschini
      "OP_TARGET": self.sstore.GetClusterName(),
1205 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1206 8084f9f6 Manuel Franceschini
      }
1207 8084f9f6 Manuel Franceschini
    mn = self.sstore.GetMasterNode()
1208 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1209 8084f9f6 Manuel Franceschini
1210 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1211 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1212 8084f9f6 Manuel Franceschini

1213 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1214 5f83e263 Iustin Pop
    if the given volume group is valid.
1215 8084f9f6 Manuel Franceschini

1216 8084f9f6 Manuel Franceschini
    """
1217 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
1218 8084f9f6 Manuel Franceschini
      instances = [self.cfg.GetInstanceInfo(name)
1219 8084f9f6 Manuel Franceschini
                   for name in self.cfg.GetInstanceList()]
1220 8084f9f6 Manuel Franceschini
      for inst in instances:
1221 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1222 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1223 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1224 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1225 8084f9f6 Manuel Franceschini
1226 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1227 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1228 8084f9f6 Manuel Franceschini
      node_list = self.cfg.GetNodeList()
1229 8084f9f6 Manuel Franceschini
      vglist = rpc.call_vg_list(node_list)
1230 8084f9f6 Manuel Franceschini
      for node in node_list:
1231 8084f9f6 Manuel Franceschini
        vgstatus = _HasValidVG(vglist[node], self.op.vg_name)
1232 8084f9f6 Manuel Franceschini
        if vgstatus:
1233 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1234 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1235 8084f9f6 Manuel Franceschini
1236 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1237 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1238 8084f9f6 Manuel Franceschini

1239 8084f9f6 Manuel Franceschini
    """
1240 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
1241 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
1242 8084f9f6 Manuel Franceschini
    else:
1243 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1244 8084f9f6 Manuel Franceschini
                  " state, not changing")
1245 8084f9f6 Manuel Franceschini
1246 8084f9f6 Manuel Franceschini
1247 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1248 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1249 a8083063 Iustin Pop

1250 a8083063 Iustin Pop
  """
1251 a8083063 Iustin Pop
  if not instance.disks:
1252 a8083063 Iustin Pop
    return True
1253 a8083063 Iustin Pop
1254 a8083063 Iustin Pop
  if not oneshot:
1255 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1256 a8083063 Iustin Pop
1257 a8083063 Iustin Pop
  node = instance.primary_node
1258 a8083063 Iustin Pop
1259 a8083063 Iustin Pop
  for dev in instance.disks:
1260 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1261 a8083063 Iustin Pop
1262 a8083063 Iustin Pop
  retries = 0
1263 a8083063 Iustin Pop
  while True:
1264 a8083063 Iustin Pop
    max_time = 0
1265 a8083063 Iustin Pop
    done = True
1266 a8083063 Iustin Pop
    cumul_degraded = False
1267 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1268 a8083063 Iustin Pop
    if not rstats:
1269 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1270 a8083063 Iustin Pop
      retries += 1
1271 a8083063 Iustin Pop
      if retries >= 10:
1272 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1273 3ecf6786 Iustin Pop
                                 " aborting." % node)
1274 a8083063 Iustin Pop
      time.sleep(6)
1275 a8083063 Iustin Pop
      continue
1276 a8083063 Iustin Pop
    retries = 0
1277 a8083063 Iustin Pop
    for i in range(len(rstats)):
1278 a8083063 Iustin Pop
      mstat = rstats[i]
1279 a8083063 Iustin Pop
      if mstat is None:
1280 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1281 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1282 a8083063 Iustin Pop
        continue
1283 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1284 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1285 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1286 a8083063 Iustin Pop
      if perc_done is not None:
1287 a8083063 Iustin Pop
        done = False
1288 a8083063 Iustin Pop
        if est_time is not None:
1289 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1290 a8083063 Iustin Pop
          max_time = est_time
1291 a8083063 Iustin Pop
        else:
1292 a8083063 Iustin Pop
          rem_time = "no time estimate"
1293 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1294 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1295 a8083063 Iustin Pop
    if done or oneshot:
1296 a8083063 Iustin Pop
      break
1297 a8083063 Iustin Pop
1298 a8083063 Iustin Pop
    if unlock:
1299 685ee993 Iustin Pop
      #utils.Unlock('cmd')
1300 685ee993 Iustin Pop
      pass
1301 a8083063 Iustin Pop
    try:
1302 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1303 a8083063 Iustin Pop
    finally:
1304 a8083063 Iustin Pop
      if unlock:
1305 685ee993 Iustin Pop
        #utils.Lock('cmd')
1306 685ee993 Iustin Pop
        pass
1307 a8083063 Iustin Pop
1308 a8083063 Iustin Pop
  if done:
1309 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1310 a8083063 Iustin Pop
  return not cumul_degraded
1311 a8083063 Iustin Pop
1312 a8083063 Iustin Pop
1313 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1314 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1315 a8083063 Iustin Pop

1316 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1317 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1318 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1319 0834c866 Iustin Pop

1320 a8083063 Iustin Pop
  """
1321 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1322 0834c866 Iustin Pop
  if ldisk:
1323 0834c866 Iustin Pop
    idx = 6
1324 0834c866 Iustin Pop
  else:
1325 0834c866 Iustin Pop
    idx = 5
1326 a8083063 Iustin Pop
1327 a8083063 Iustin Pop
  result = True
1328 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1329 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1330 a8083063 Iustin Pop
    if not rstats:
1331 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1332 a8083063 Iustin Pop
      result = False
1333 a8083063 Iustin Pop
    else:
1334 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1335 a8083063 Iustin Pop
  if dev.children:
1336 a8083063 Iustin Pop
    for child in dev.children:
1337 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1338 a8083063 Iustin Pop
1339 a8083063 Iustin Pop
  return result
1340 a8083063 Iustin Pop
1341 a8083063 Iustin Pop
1342 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1343 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1344 a8083063 Iustin Pop

1345 a8083063 Iustin Pop
  """
1346 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1347 a8083063 Iustin Pop
1348 a8083063 Iustin Pop
  def CheckPrereq(self):
1349 a8083063 Iustin Pop
    """Check prerequisites.
1350 a8083063 Iustin Pop

1351 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1352 a8083063 Iustin Pop

1353 a8083063 Iustin Pop
    """
1354 1f9430d6 Iustin Pop
    if self.op.names:
1355 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1356 1f9430d6 Iustin Pop
1357 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1358 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1359 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1360 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1361 1f9430d6 Iustin Pop
1362 1f9430d6 Iustin Pop
  @staticmethod
1363 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1364 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1365 1f9430d6 Iustin Pop

1366 1f9430d6 Iustin Pop
      Args:
1367 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1368 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1369 1f9430d6 Iustin Pop

1370 1f9430d6 Iustin Pop
      Returns:
1371 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1372 1f9430d6 Iustin Pop
             nodes as
1373 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1374 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1375 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1376 1f9430d6 Iustin Pop
                  }
1377 1f9430d6 Iustin Pop

1378 1f9430d6 Iustin Pop
    """
1379 1f9430d6 Iustin Pop
    all_os = {}
1380 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1381 1f9430d6 Iustin Pop
      if not nr:
1382 1f9430d6 Iustin Pop
        continue
1383 b4de68a9 Iustin Pop
      for os_obj in nr:
1384 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1385 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1386 1f9430d6 Iustin Pop
          # for each node in node_list
1387 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1388 1f9430d6 Iustin Pop
          for nname in node_list:
1389 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1390 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1391 1f9430d6 Iustin Pop
    return all_os
1392 a8083063 Iustin Pop
1393 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1394 a8083063 Iustin Pop
    """Compute the list of OSes.
1395 a8083063 Iustin Pop

1396 a8083063 Iustin Pop
    """
1397 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1398 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1399 a8083063 Iustin Pop
    if node_data == False:
1400 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1401 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1402 1f9430d6 Iustin Pop
    output = []
1403 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1404 1f9430d6 Iustin Pop
      row = []
1405 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1406 1f9430d6 Iustin Pop
        if field == "name":
1407 1f9430d6 Iustin Pop
          val = os_name
1408 1f9430d6 Iustin Pop
        elif field == "valid":
1409 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1410 1f9430d6 Iustin Pop
        elif field == "node_status":
1411 1f9430d6 Iustin Pop
          val = {}
1412 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1413 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1414 1f9430d6 Iustin Pop
        else:
1415 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1416 1f9430d6 Iustin Pop
        row.append(val)
1417 1f9430d6 Iustin Pop
      output.append(row)
1418 1f9430d6 Iustin Pop
1419 1f9430d6 Iustin Pop
    return output
1420 a8083063 Iustin Pop
1421 a8083063 Iustin Pop
1422 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1423 a8083063 Iustin Pop
  """Logical unit for removing a node.
1424 a8083063 Iustin Pop

1425 a8083063 Iustin Pop
  """
1426 a8083063 Iustin Pop
  HPATH = "node-remove"
1427 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1428 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1429 a8083063 Iustin Pop
1430 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1431 a8083063 Iustin Pop
    """Build hooks env.
1432 a8083063 Iustin Pop

1433 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1434 a8083063 Iustin Pop
    node would not allows itself to run.
1435 a8083063 Iustin Pop

1436 a8083063 Iustin Pop
    """
1437 396e1b78 Michael Hanselmann
    env = {
1438 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1439 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1440 396e1b78 Michael Hanselmann
      }
1441 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1442 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1443 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1444 a8083063 Iustin Pop
1445 a8083063 Iustin Pop
  def CheckPrereq(self):
1446 a8083063 Iustin Pop
    """Check prerequisites.
1447 a8083063 Iustin Pop

1448 a8083063 Iustin Pop
    This checks:
1449 a8083063 Iustin Pop
     - the node exists in the configuration
1450 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1451 a8083063 Iustin Pop
     - it's not the master
1452 a8083063 Iustin Pop

1453 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1454 a8083063 Iustin Pop

1455 a8083063 Iustin Pop
    """
1456 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1457 a8083063 Iustin Pop
    if node is None:
1458 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1459 a8083063 Iustin Pop
1460 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1461 a8083063 Iustin Pop
1462 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1463 a8083063 Iustin Pop
    if node.name == masternode:
1464 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1465 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1466 a8083063 Iustin Pop
1467 a8083063 Iustin Pop
    for instance_name in instance_list:
1468 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1469 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1470 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1471 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1472 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1473 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1474 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1475 a8083063 Iustin Pop
    self.op.node_name = node.name
1476 a8083063 Iustin Pop
    self.node = node
1477 a8083063 Iustin Pop
1478 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1479 a8083063 Iustin Pop
    """Removes the node from the cluster.
1480 a8083063 Iustin Pop

1481 a8083063 Iustin Pop
    """
1482 a8083063 Iustin Pop
    node = self.node
1483 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1484 a8083063 Iustin Pop
                node.name)
1485 a8083063 Iustin Pop
1486 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1487 a8083063 Iustin Pop
1488 c92b310a Michael Hanselmann
    self.ssh.Run(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1489 a8083063 Iustin Pop
1490 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1491 a8083063 Iustin Pop
1492 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1493 a8083063 Iustin Pop
1494 c8a0948f Michael Hanselmann
    _RemoveHostFromEtcHosts(node.name)
1495 c8a0948f Michael Hanselmann
1496 a8083063 Iustin Pop
1497 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1498 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1499 a8083063 Iustin Pop

1500 a8083063 Iustin Pop
  """
1501 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1502 a8083063 Iustin Pop
1503 a8083063 Iustin Pop
  def CheckPrereq(self):
1504 a8083063 Iustin Pop
    """Check prerequisites.
1505 a8083063 Iustin Pop

1506 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1507 a8083063 Iustin Pop

1508 a8083063 Iustin Pop
    """
1509 e8a4c138 Iustin Pop
    self.dynamic_fields = frozenset([
1510 e8a4c138 Iustin Pop
      "dtotal", "dfree",
1511 e8a4c138 Iustin Pop
      "mtotal", "mnode", "mfree",
1512 e8a4c138 Iustin Pop
      "bootid",
1513 e8a4c138 Iustin Pop
      "ctotal",
1514 e8a4c138 Iustin Pop
      ])
1515 a8083063 Iustin Pop
1516 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1517 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1518 ec223efb Iustin Pop
                               "pip", "sip"],
1519 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1520 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1521 a8083063 Iustin Pop
1522 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1523 a8083063 Iustin Pop
1524 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1525 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1526 a8083063 Iustin Pop

1527 a8083063 Iustin Pop
    """
1528 246e180a Iustin Pop
    nodenames = self.wanted
1529 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1530 a8083063 Iustin Pop
1531 a8083063 Iustin Pop
    # begin data gathering
1532 a8083063 Iustin Pop
1533 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1534 a8083063 Iustin Pop
      live_data = {}
1535 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1536 a8083063 Iustin Pop
      for name in nodenames:
1537 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1538 a8083063 Iustin Pop
        if nodeinfo:
1539 a8083063 Iustin Pop
          live_data[name] = {
1540 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1541 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1542 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1543 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1544 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1545 e8a4c138 Iustin Pop
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1546 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1547 a8083063 Iustin Pop
            }
1548 a8083063 Iustin Pop
        else:
1549 a8083063 Iustin Pop
          live_data[name] = {}
1550 a8083063 Iustin Pop
    else:
1551 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1552 a8083063 Iustin Pop
1553 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1554 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1555 a8083063 Iustin Pop
1556 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1557 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1558 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1559 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1560 a8083063 Iustin Pop
1561 ec223efb Iustin Pop
      for instance_name in instancelist:
1562 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1563 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1564 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1565 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1566 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1567 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1568 a8083063 Iustin Pop
1569 a8083063 Iustin Pop
    # end data gathering
1570 a8083063 Iustin Pop
1571 a8083063 Iustin Pop
    output = []
1572 a8083063 Iustin Pop
    for node in nodelist:
1573 a8083063 Iustin Pop
      node_output = []
1574 a8083063 Iustin Pop
      for field in self.op.output_fields:
1575 a8083063 Iustin Pop
        if field == "name":
1576 a8083063 Iustin Pop
          val = node.name
1577 ec223efb Iustin Pop
        elif field == "pinst_list":
1578 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1579 ec223efb Iustin Pop
        elif field == "sinst_list":
1580 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1581 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1582 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1583 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1584 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1585 a8083063 Iustin Pop
        elif field == "pip":
1586 a8083063 Iustin Pop
          val = node.primary_ip
1587 a8083063 Iustin Pop
        elif field == "sip":
1588 a8083063 Iustin Pop
          val = node.secondary_ip
1589 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1590 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1591 a8083063 Iustin Pop
        else:
1592 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1593 a8083063 Iustin Pop
        node_output.append(val)
1594 a8083063 Iustin Pop
      output.append(node_output)
1595 a8083063 Iustin Pop
1596 a8083063 Iustin Pop
    return output
1597 a8083063 Iustin Pop
1598 a8083063 Iustin Pop
1599 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1600 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1601 dcb93971 Michael Hanselmann

1602 dcb93971 Michael Hanselmann
  """
1603 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1604 dcb93971 Michael Hanselmann
1605 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1606 dcb93971 Michael Hanselmann
    """Check prerequisites.
1607 dcb93971 Michael Hanselmann

1608 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1609 dcb93971 Michael Hanselmann

1610 dcb93971 Michael Hanselmann
    """
1611 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1612 dcb93971 Michael Hanselmann
1613 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1614 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1615 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1616 dcb93971 Michael Hanselmann
1617 dcb93971 Michael Hanselmann
1618 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1619 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1620 dcb93971 Michael Hanselmann

1621 dcb93971 Michael Hanselmann
    """
1622 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1623 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1624 dcb93971 Michael Hanselmann
1625 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1626 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1627 dcb93971 Michael Hanselmann
1628 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1629 dcb93971 Michael Hanselmann
1630 dcb93971 Michael Hanselmann
    output = []
1631 dcb93971 Michael Hanselmann
    for node in nodenames:
1632 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1633 37d19eb2 Michael Hanselmann
        continue
1634 37d19eb2 Michael Hanselmann
1635 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1636 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1637 dcb93971 Michael Hanselmann
1638 dcb93971 Michael Hanselmann
      for vol in node_vols:
1639 dcb93971 Michael Hanselmann
        node_output = []
1640 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1641 dcb93971 Michael Hanselmann
          if field == "node":
1642 dcb93971 Michael Hanselmann
            val = node
1643 dcb93971 Michael Hanselmann
          elif field == "phys":
1644 dcb93971 Michael Hanselmann
            val = vol['dev']
1645 dcb93971 Michael Hanselmann
          elif field == "vg":
1646 dcb93971 Michael Hanselmann
            val = vol['vg']
1647 dcb93971 Michael Hanselmann
          elif field == "name":
1648 dcb93971 Michael Hanselmann
            val = vol['name']
1649 dcb93971 Michael Hanselmann
          elif field == "size":
1650 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1651 dcb93971 Michael Hanselmann
          elif field == "instance":
1652 dcb93971 Michael Hanselmann
            for inst in ilist:
1653 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1654 dcb93971 Michael Hanselmann
                continue
1655 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1656 dcb93971 Michael Hanselmann
                val = inst.name
1657 dcb93971 Michael Hanselmann
                break
1658 dcb93971 Michael Hanselmann
            else:
1659 dcb93971 Michael Hanselmann
              val = '-'
1660 dcb93971 Michael Hanselmann
          else:
1661 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1662 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1663 dcb93971 Michael Hanselmann
1664 dcb93971 Michael Hanselmann
        output.append(node_output)
1665 dcb93971 Michael Hanselmann
1666 dcb93971 Michael Hanselmann
    return output
1667 dcb93971 Michael Hanselmann
1668 dcb93971 Michael Hanselmann
1669 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1670 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1671 a8083063 Iustin Pop

1672 a8083063 Iustin Pop
  """
1673 a8083063 Iustin Pop
  HPATH = "node-add"
1674 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1675 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1676 a8083063 Iustin Pop
1677 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1678 a8083063 Iustin Pop
    """Build hooks env.
1679 a8083063 Iustin Pop

1680 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1681 a8083063 Iustin Pop

1682 a8083063 Iustin Pop
    """
1683 a8083063 Iustin Pop
    env = {
1684 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1685 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1686 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1687 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1688 a8083063 Iustin Pop
      }
1689 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1690 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1691 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1692 a8083063 Iustin Pop
1693 a8083063 Iustin Pop
  def CheckPrereq(self):
1694 a8083063 Iustin Pop
    """Check prerequisites.
1695 a8083063 Iustin Pop

1696 a8083063 Iustin Pop
    This checks:
1697 a8083063 Iustin Pop
     - the new node is not already in the config
1698 a8083063 Iustin Pop
     - it is resolvable
1699 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1700 a8083063 Iustin Pop

1701 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1702 a8083063 Iustin Pop

1703 a8083063 Iustin Pop
    """
1704 a8083063 Iustin Pop
    node_name = self.op.node_name
1705 a8083063 Iustin Pop
    cfg = self.cfg
1706 a8083063 Iustin Pop
1707 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1708 a8083063 Iustin Pop
1709 bcf043c9 Iustin Pop
    node = dns_data.name
1710 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1711 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1712 a8083063 Iustin Pop
    if secondary_ip is None:
1713 a8083063 Iustin Pop
      secondary_ip = primary_ip
1714 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1715 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1716 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1717 e7c6e02b Michael Hanselmann
1718 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1719 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1720 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1721 e7c6e02b Michael Hanselmann
                                 node)
1722 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1723 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1724 a8083063 Iustin Pop
1725 a8083063 Iustin Pop
    for existing_node_name in node_list:
1726 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1727 e7c6e02b Michael Hanselmann
1728 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1729 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1730 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1731 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1732 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1733 e7c6e02b Michael Hanselmann
        continue
1734 e7c6e02b Michael Hanselmann
1735 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1736 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1737 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1738 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1739 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1740 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1741 a8083063 Iustin Pop
1742 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1743 a8083063 Iustin Pop
    # same as for the master
1744 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1745 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1746 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1747 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1748 a8083063 Iustin Pop
      if master_singlehomed:
1749 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1750 3ecf6786 Iustin Pop
                                   " new node has one")
1751 a8083063 Iustin Pop
      else:
1752 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1753 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1754 a8083063 Iustin Pop
1755 a8083063 Iustin Pop
    # checks reachablity
1756 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1757 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1758 a8083063 Iustin Pop
1759 a8083063 Iustin Pop
    if not newbie_singlehomed:
1760 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1761 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1762 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1763 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1764 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1765 a8083063 Iustin Pop
1766 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1767 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1768 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1769 a8083063 Iustin Pop
1770 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1771 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
1772 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Cluster VNC password file %s missing" %
1773 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
1774 2a6469d5 Alexander Schreiber
1775 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1776 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1777 a8083063 Iustin Pop

1778 a8083063 Iustin Pop
    """
1779 a8083063 Iustin Pop
    new_node = self.new_node
1780 a8083063 Iustin Pop
    node = new_node.name
1781 a8083063 Iustin Pop
1782 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1783 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1784 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1785 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1786 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1787 a8083063 Iustin Pop
    try:
1788 a8083063 Iustin Pop
      gntpem = f.read(8192)
1789 a8083063 Iustin Pop
    finally:
1790 a8083063 Iustin Pop
      f.close()
1791 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1792 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1793 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1794 a8083063 Iustin Pop
    # parsed by the shell sequence below
1795 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1796 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1797 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1798 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1799 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1800 a8083063 Iustin Pop
1801 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1802 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1803 a8083063 Iustin Pop
    # either by being constants or by the checks above
1804 a8083063 Iustin Pop
    ss = self.sstore
1805 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1806 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1807 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1808 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1809 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1810 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1811 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1812 a8083063 Iustin Pop
1813 c92b310a Michael Hanselmann
    result = self.ssh.Run(node, 'root', mycommand, batch=False, ask_key=True)
1814 a8083063 Iustin Pop
    if result.failed:
1815 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1816 3ecf6786 Iustin Pop
                               " output: %s" %
1817 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1818 a8083063 Iustin Pop
1819 a8083063 Iustin Pop
    # check connectivity
1820 a8083063 Iustin Pop
    time.sleep(4)
1821 a8083063 Iustin Pop
1822 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1823 a8083063 Iustin Pop
    if result:
1824 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1825 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1826 a8083063 Iustin Pop
                    (node, result))
1827 a8083063 Iustin Pop
      else:
1828 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1829 3ecf6786 Iustin Pop
                                 " node version %s" %
1830 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1831 a8083063 Iustin Pop
    else:
1832 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1833 a8083063 Iustin Pop
1834 a8083063 Iustin Pop
    # setup ssh on node
1835 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1836 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1837 a8083063 Iustin Pop
    keyarray = []
1838 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1839 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1840 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1841 a8083063 Iustin Pop
1842 a8083063 Iustin Pop
    for i in keyfiles:
1843 a8083063 Iustin Pop
      f = open(i, 'r')
1844 a8083063 Iustin Pop
      try:
1845 a8083063 Iustin Pop
        keyarray.append(f.read())
1846 a8083063 Iustin Pop
      finally:
1847 a8083063 Iustin Pop
        f.close()
1848 a8083063 Iustin Pop
1849 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1850 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1851 a8083063 Iustin Pop
1852 a8083063 Iustin Pop
    if not result:
1853 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1854 a8083063 Iustin Pop
1855 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1856 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(new_node.name)
1857 c8a0948f Michael Hanselmann
1858 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1859 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1860 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1861 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1862 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1863 16abfbc2 Alexander Schreiber
                                    10, False):
1864 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1865 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1866 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1867 a8083063 Iustin Pop
1868 c92b310a Michael Hanselmann
    success, msg = self.ssh.VerifyNodeHostname(node)
1869 ff98055b Iustin Pop
    if not success:
1870 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1871 f4bc1f2c Michael Hanselmann
                               " than the one the resolver gives: %s."
1872 f4bc1f2c Michael Hanselmann
                               " Please fix and re-run this command." %
1873 ff98055b Iustin Pop
                               (node, msg))
1874 ff98055b Iustin Pop
1875 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1876 a8083063 Iustin Pop
    # including the node just added
1877 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1878 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
1879 102b115b Michael Hanselmann
    if not self.op.readd:
1880 102b115b Michael Hanselmann
      dist_nodes.append(node)
1881 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1882 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1883 a8083063 Iustin Pop
1884 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1885 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1886 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1887 a8083063 Iustin Pop
      for to_node in dist_nodes:
1888 a8083063 Iustin Pop
        if not result[to_node]:
1889 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1890 a8083063 Iustin Pop
                       (fname, to_node))
1891 a8083063 Iustin Pop
1892 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1893 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1894 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1895 a8083063 Iustin Pop
    for fname in to_copy:
1896 c92b310a Michael Hanselmann
      if not self.ssh.CopyFileToNode(node, fname):
1897 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1898 a8083063 Iustin Pop
1899 e7c6e02b Michael Hanselmann
    if not self.op.readd:
1900 e7c6e02b Michael Hanselmann
      logger.Info("adding node %s to cluster.conf" % node)
1901 e7c6e02b Michael Hanselmann
      self.cfg.AddNode(new_node)
1902 a8083063 Iustin Pop
1903 a8083063 Iustin Pop
1904 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1905 a8083063 Iustin Pop
  """Failover the master node to the current node.
1906 a8083063 Iustin Pop

1907 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1908 a8083063 Iustin Pop

1909 a8083063 Iustin Pop
  """
1910 a8083063 Iustin Pop
  HPATH = "master-failover"
1911 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1912 a8083063 Iustin Pop
  REQ_MASTER = False
1913 a8083063 Iustin Pop
  _OP_REQP = []
1914 a8083063 Iustin Pop
1915 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1916 a8083063 Iustin Pop
    """Build hooks env.
1917 a8083063 Iustin Pop

1918 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1919 a8083063 Iustin Pop
    the nodes in the post phase.
1920 a8083063 Iustin Pop

1921 a8083063 Iustin Pop
    """
1922 a8083063 Iustin Pop
    env = {
1923 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1924 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1925 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1926 a8083063 Iustin Pop
      }
1927 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1928 a8083063 Iustin Pop
1929 a8083063 Iustin Pop
  def CheckPrereq(self):
1930 a8083063 Iustin Pop
    """Check prerequisites.
1931 a8083063 Iustin Pop

1932 a8083063 Iustin Pop
    This checks that we are not already the master.
1933 a8083063 Iustin Pop

1934 a8083063 Iustin Pop
    """
1935 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1936 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1937 a8083063 Iustin Pop
1938 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1939 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1940 f4bc1f2c Michael Hanselmann
                                 " where you want the new master to be."
1941 f4bc1f2c Michael Hanselmann
                                 " %s is already the master" %
1942 3ecf6786 Iustin Pop
                                 self.old_master)
1943 a8083063 Iustin Pop
1944 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1945 a8083063 Iustin Pop
    """Failover the master node.
1946 a8083063 Iustin Pop

1947 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1948 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1949 a8083063 Iustin Pop
    master.
1950 a8083063 Iustin Pop

1951 a8083063 Iustin Pop
    """
1952 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1953 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1954 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1955 a8083063 Iustin Pop
1956 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1957 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1958 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1959 a8083063 Iustin Pop
1960 880478f8 Iustin Pop
    ss = self.sstore
1961 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1962 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1963 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1964 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1965 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1966 880478f8 Iustin Pop
1967 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1968 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1969 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1970 f4bc1f2c Michael Hanselmann
      feedback_fn("Error in activating the master IP on the new master,"
1971 f4bc1f2c Michael Hanselmann
                  " please fix manually.")
1972 a8083063 Iustin Pop
1973 a8083063 Iustin Pop
1974 a8083063 Iustin Pop
1975 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1976 a8083063 Iustin Pop
  """Query cluster configuration.
1977 a8083063 Iustin Pop

1978 a8083063 Iustin Pop
  """
1979 a8083063 Iustin Pop
  _OP_REQP = []
1980 59322403 Iustin Pop
  REQ_MASTER = False
1981 a8083063 Iustin Pop
1982 a8083063 Iustin Pop
  def CheckPrereq(self):
1983 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1984 a8083063 Iustin Pop

1985 a8083063 Iustin Pop
    """
1986 a8083063 Iustin Pop
    pass
1987 a8083063 Iustin Pop
1988 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1989 a8083063 Iustin Pop
    """Return cluster config.
1990 a8083063 Iustin Pop

1991 a8083063 Iustin Pop
    """
1992 a8083063 Iustin Pop
    result = {
1993 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1994 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1995 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1996 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1997 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1998 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1999 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
2000 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2001 8a12ce45 Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
2002 a8083063 Iustin Pop
      }
2003 a8083063 Iustin Pop
2004 a8083063 Iustin Pop
    return result
2005 a8083063 Iustin Pop
2006 a8083063 Iustin Pop
2007 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
2008 a8083063 Iustin Pop
  """Copy file to cluster.
2009 a8083063 Iustin Pop

2010 a8083063 Iustin Pop
  """
2011 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
2012 a8083063 Iustin Pop
2013 a8083063 Iustin Pop
  def CheckPrereq(self):
2014 a8083063 Iustin Pop
    """Check prerequisites.
2015 a8083063 Iustin Pop

2016 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
2017 a8083063 Iustin Pop
    of nodes is valid.
2018 a8083063 Iustin Pop

2019 a8083063 Iustin Pop
    """
2020 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
2021 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
2022 dcb93971 Michael Hanselmann
2023 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
2024 a8083063 Iustin Pop
2025 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2026 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
2027 a8083063 Iustin Pop

2028 a8083063 Iustin Pop
    Args:
2029 a8083063 Iustin Pop
      opts - class with options as members
2030 a8083063 Iustin Pop
      args - list containing a single element, the file name
2031 a8083063 Iustin Pop
    Opts used:
2032 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
2033 a8083063 Iustin Pop

2034 a8083063 Iustin Pop
    """
2035 a8083063 Iustin Pop
    filename = self.op.filename
2036 a8083063 Iustin Pop
2037 89e1fc26 Iustin Pop
    myname = utils.HostInfo().name
2038 a8083063 Iustin Pop
2039 a7ba5e53 Iustin Pop
    for node in self.nodes:
2040 a8083063 Iustin Pop
      if node == myname:
2041 a8083063 Iustin Pop
        continue
2042 c92b310a Michael Hanselmann
      if not self.ssh.CopyFileToNode(node, filename):
2043 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
2044 a8083063 Iustin Pop
2045 a8083063 Iustin Pop
2046 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
2047 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
2048 a8083063 Iustin Pop

2049 a8083063 Iustin Pop
  """
2050 a8083063 Iustin Pop
  _OP_REQP = []
2051 a8083063 Iustin Pop
2052 a8083063 Iustin Pop
  def CheckPrereq(self):
2053 a8083063 Iustin Pop
    """No prerequisites.
2054 a8083063 Iustin Pop

2055 a8083063 Iustin Pop
    """
2056 a8083063 Iustin Pop
    pass
2057 a8083063 Iustin Pop
2058 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2059 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2060 a8083063 Iustin Pop

2061 a8083063 Iustin Pop
    """
2062 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
2063 a8083063 Iustin Pop
2064 a8083063 Iustin Pop
2065 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
2066 a8083063 Iustin Pop
  """Run a command on some nodes.
2067 a8083063 Iustin Pop

2068 a8083063 Iustin Pop
  """
2069 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
2070 a8083063 Iustin Pop
2071 a8083063 Iustin Pop
  def CheckPrereq(self):
2072 a8083063 Iustin Pop
    """Check prerequisites.
2073 a8083063 Iustin Pop

2074 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
2075 a8083063 Iustin Pop

2076 a8083063 Iustin Pop
    """
2077 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
2078 a8083063 Iustin Pop
2079 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2080 a8083063 Iustin Pop
    """Run a command on some nodes.
2081 a8083063 Iustin Pop

2082 a8083063 Iustin Pop
    """
2083 5f83e263 Iustin Pop
    # put the master at the end of the nodes list
2084 5f83e263 Iustin Pop
    master_node = self.sstore.GetMasterNode()
2085 5f83e263 Iustin Pop
    if master_node in self.nodes:
2086 5f83e263 Iustin Pop
      self.nodes.remove(master_node)
2087 5f83e263 Iustin Pop
      self.nodes.append(master_node)
2088 5f83e263 Iustin Pop
2089 a8083063 Iustin Pop
    data = []
2090 a8083063 Iustin Pop
    for node in self.nodes:
2091 c92b310a Michael Hanselmann
      result = self.ssh.Run(node, "root", self.op.command)
2092 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
2093 a8083063 Iustin Pop
2094 a8083063 Iustin Pop
    return data
2095 a8083063 Iustin Pop
2096 a8083063 Iustin Pop
2097 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2098 a8083063 Iustin Pop
  """Bring up an instance's disks.
2099 a8083063 Iustin Pop

2100 a8083063 Iustin Pop
  """
2101 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2102 a8083063 Iustin Pop
2103 a8083063 Iustin Pop
  def CheckPrereq(self):
2104 a8083063 Iustin Pop
    """Check prerequisites.
2105 a8083063 Iustin Pop

2106 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2107 a8083063 Iustin Pop

2108 a8083063 Iustin Pop
    """
2109 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2110 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2111 a8083063 Iustin Pop
    if instance is None:
2112 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2113 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2114 a8083063 Iustin Pop
    self.instance = instance
2115 a8083063 Iustin Pop
2116 a8083063 Iustin Pop
2117 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2118 a8083063 Iustin Pop
    """Activate the disks.
2119 a8083063 Iustin Pop

2120 a8083063 Iustin Pop
    """
2121 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
2122 a8083063 Iustin Pop
    if not disks_ok:
2123 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2124 a8083063 Iustin Pop
2125 a8083063 Iustin Pop
    return disks_info
2126 a8083063 Iustin Pop
2127 a8083063 Iustin Pop
2128 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
2129 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2130 a8083063 Iustin Pop

2131 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2132 a8083063 Iustin Pop

2133 a8083063 Iustin Pop
  Args:
2134 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
2135 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
2136 a8083063 Iustin Pop
                        in an error return from the function
2137 a8083063 Iustin Pop

2138 a8083063 Iustin Pop
  Returns:
2139 a8083063 Iustin Pop
    false if the operation failed
2140 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
2141 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
2142 a8083063 Iustin Pop
  """
2143 a8083063 Iustin Pop
  device_info = []
2144 a8083063 Iustin Pop
  disks_ok = True
2145 fdbd668d Iustin Pop
  iname = instance.name
2146 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2147 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2148 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2149 fdbd668d Iustin Pop
2150 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2151 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2152 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2153 fdbd668d Iustin Pop
  # SyncSource, etc.)
2154 fdbd668d Iustin Pop
2155 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2156 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2157 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2158 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
2159 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
2160 a8083063 Iustin Pop
      if not result:
2161 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
2162 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
2163 fdbd668d Iustin Pop
        if not ignore_secondaries:
2164 a8083063 Iustin Pop
          disks_ok = False
2165 fdbd668d Iustin Pop
2166 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2167 fdbd668d Iustin Pop
2168 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2169 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2170 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2171 fdbd668d Iustin Pop
      if node != instance.primary_node:
2172 fdbd668d Iustin Pop
        continue
2173 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
2174 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
2175 fdbd668d Iustin Pop
      if not result:
2176 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
2177 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
2178 fdbd668d Iustin Pop
        disks_ok = False
2179 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
2180 a8083063 Iustin Pop
2181 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2182 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2183 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2184 b352ab5b Iustin Pop
  for disk in instance.disks:
2185 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
2186 b352ab5b Iustin Pop
2187 a8083063 Iustin Pop
  return disks_ok, device_info
2188 a8083063 Iustin Pop
2189 a8083063 Iustin Pop
2190 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
2191 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2192 3ecf6786 Iustin Pop

2193 3ecf6786 Iustin Pop
  """
2194 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
2195 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2196 fe7b0351 Michael Hanselmann
  if not disks_ok:
2197 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
2198 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2199 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
2200 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
2201 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2202 fe7b0351 Michael Hanselmann
2203 fe7b0351 Michael Hanselmann
2204 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2205 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2206 a8083063 Iustin Pop

2207 a8083063 Iustin Pop
  """
2208 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2209 a8083063 Iustin Pop
2210 a8083063 Iustin Pop
  def CheckPrereq(self):
2211 a8083063 Iustin Pop
    """Check prerequisites.
2212 a8083063 Iustin Pop

2213 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2214 a8083063 Iustin Pop

2215 a8083063 Iustin Pop
    """
2216 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2217 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2218 a8083063 Iustin Pop
    if instance is None:
2219 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2220 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2221 a8083063 Iustin Pop
    self.instance = instance
2222 a8083063 Iustin Pop
2223 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2224 a8083063 Iustin Pop
    """Deactivate the disks
2225 a8083063 Iustin Pop

2226 a8083063 Iustin Pop
    """
2227 a8083063 Iustin Pop
    instance = self.instance
2228 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
2229 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
2230 a8083063 Iustin Pop
    if not type(ins_l) is list:
2231 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
2232 3ecf6786 Iustin Pop
                               instance.primary_node)
2233 a8083063 Iustin Pop
2234 a8083063 Iustin Pop
    if self.instance.name in ins_l:
2235 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
2236 3ecf6786 Iustin Pop
                               " block devices.")
2237 a8083063 Iustin Pop
2238 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2239 a8083063 Iustin Pop
2240 a8083063 Iustin Pop
2241 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
2242 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2243 a8083063 Iustin Pop

2244 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2245 a8083063 Iustin Pop

2246 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2247 a8083063 Iustin Pop
  ignored.
2248 a8083063 Iustin Pop

2249 a8083063 Iustin Pop
  """
2250 a8083063 Iustin Pop
  result = True
2251 a8083063 Iustin Pop
  for disk in instance.disks:
2252 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2253 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
2254 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
2255 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
2256 a8083063 Iustin Pop
                     (disk.iv_name, node))
2257 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2258 a8083063 Iustin Pop
          result = False
2259 a8083063 Iustin Pop
  return result
2260 a8083063 Iustin Pop
2261 a8083063 Iustin Pop
2262 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
2263 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2264 d4f16fd9 Iustin Pop

2265 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2266 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2267 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2268 d4f16fd9 Iustin Pop
  exception.
2269 d4f16fd9 Iustin Pop

2270 d4f16fd9 Iustin Pop
  Args:
2271 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
2272 d4f16fd9 Iustin Pop
    - node: the node name
2273 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
2274 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
2275 d4f16fd9 Iustin Pop

2276 d4f16fd9 Iustin Pop
  """
2277 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
2278 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2279 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2280 d4f16fd9 Iustin Pop
                             " information" % (node,))
2281 d4f16fd9 Iustin Pop
2282 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2283 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2284 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2285 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2286 d4f16fd9 Iustin Pop
  if requested > free_mem:
2287 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2288 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2289 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2290 d4f16fd9 Iustin Pop
2291 d4f16fd9 Iustin Pop
2292 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2293 a8083063 Iustin Pop
  """Starts an instance.
2294 a8083063 Iustin Pop

2295 a8083063 Iustin Pop
  """
2296 a8083063 Iustin Pop
  HPATH = "instance-start"
2297 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2298 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2299 a8083063 Iustin Pop
2300 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2301 a8083063 Iustin Pop
    """Build hooks env.
2302 a8083063 Iustin Pop

2303 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2304 a8083063 Iustin Pop

2305 a8083063 Iustin Pop
    """
2306 a8083063 Iustin Pop
    env = {
2307 a8083063 Iustin Pop
      "FORCE": self.op.force,
2308 a8083063 Iustin Pop
      }
2309 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2310 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2311 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2312 a8083063 Iustin Pop
    return env, nl, nl
2313 a8083063 Iustin Pop
2314 a8083063 Iustin Pop
  def CheckPrereq(self):
2315 a8083063 Iustin Pop
    """Check prerequisites.
2316 a8083063 Iustin Pop

2317 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2318 a8083063 Iustin Pop

2319 a8083063 Iustin Pop
    """
2320 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2321 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2322 a8083063 Iustin Pop
    if instance is None:
2323 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2324 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2325 a8083063 Iustin Pop
2326 a8083063 Iustin Pop
    # check bridges existance
2327 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2328 a8083063 Iustin Pop
2329 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2330 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2331 d4f16fd9 Iustin Pop
                         instance.memory)
2332 d4f16fd9 Iustin Pop
2333 a8083063 Iustin Pop
    self.instance = instance
2334 a8083063 Iustin Pop
    self.op.instance_name = instance.name
2335 a8083063 Iustin Pop
2336 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2337 a8083063 Iustin Pop
    """Start the instance.
2338 a8083063 Iustin Pop

2339 a8083063 Iustin Pop
    """
2340 a8083063 Iustin Pop
    instance = self.instance
2341 a8083063 Iustin Pop
    force = self.op.force
2342 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2343 a8083063 Iustin Pop
2344 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2345 fe482621 Iustin Pop
2346 a8083063 Iustin Pop
    node_current = instance.primary_node
2347 a8083063 Iustin Pop
2348 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2349 a8083063 Iustin Pop
2350 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2351 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2352 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2353 a8083063 Iustin Pop
2354 a8083063 Iustin Pop
2355 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2356 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2357 bf6929a2 Alexander Schreiber

2358 bf6929a2 Alexander Schreiber
  """
2359 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2360 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2361 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2362 bf6929a2 Alexander Schreiber
2363 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2364 bf6929a2 Alexander Schreiber
    """Build hooks env.
2365 bf6929a2 Alexander Schreiber

2366 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2367 bf6929a2 Alexander Schreiber

2368 bf6929a2 Alexander Schreiber
    """
2369 bf6929a2 Alexander Schreiber
    env = {
2370 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2371 bf6929a2 Alexander Schreiber
      }
2372 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2373 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2374 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2375 bf6929a2 Alexander Schreiber
    return env, nl, nl
2376 bf6929a2 Alexander Schreiber
2377 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2378 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2379 bf6929a2 Alexander Schreiber

2380 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2381 bf6929a2 Alexander Schreiber

2382 bf6929a2 Alexander Schreiber
    """
2383 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
2384 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
2385 bf6929a2 Alexander Schreiber
    if instance is None:
2386 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
2387 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
2388 bf6929a2 Alexander Schreiber
2389 bf6929a2 Alexander Schreiber
    # check bridges existance
2390 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2391 bf6929a2 Alexander Schreiber
2392 bf6929a2 Alexander Schreiber
    self.instance = instance
2393 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
2394 bf6929a2 Alexander Schreiber
2395 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2396 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2397 bf6929a2 Alexander Schreiber

2398 bf6929a2 Alexander Schreiber
    """
2399 bf6929a2 Alexander Schreiber
    instance = self.instance
2400 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2401 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2402 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2403 bf6929a2 Alexander Schreiber
2404 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2405 bf6929a2 Alexander Schreiber
2406 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2407 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
2408 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
2409 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2410 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
2411 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2412 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2413 bf6929a2 Alexander Schreiber
2414 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2415 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2416 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2417 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2418 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2419 bf6929a2 Alexander Schreiber
    else:
2420 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2421 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2422 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2423 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2424 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2425 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2426 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2427 bf6929a2 Alexander Schreiber
2428 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2429 bf6929a2 Alexander Schreiber
2430 bf6929a2 Alexander Schreiber
2431 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2432 a8083063 Iustin Pop
  """Shutdown an instance.
2433 a8083063 Iustin Pop

2434 a8083063 Iustin Pop
  """
2435 a8083063 Iustin Pop
  HPATH = "instance-stop"
2436 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2437 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2438 a8083063 Iustin Pop
2439 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2440 a8083063 Iustin Pop
    """Build hooks env.
2441 a8083063 Iustin Pop

2442 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2443 a8083063 Iustin Pop

2444 a8083063 Iustin Pop
    """
2445 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2446 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2447 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2448 a8083063 Iustin Pop
    return env, nl, nl
2449 a8083063 Iustin Pop
2450 a8083063 Iustin Pop
  def CheckPrereq(self):
2451 a8083063 Iustin Pop
    """Check prerequisites.
2452 a8083063 Iustin Pop

2453 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2454 a8083063 Iustin Pop

2455 a8083063 Iustin Pop
    """
2456 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2457 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2458 a8083063 Iustin Pop
    if instance is None:
2459 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2460 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2461 a8083063 Iustin Pop
    self.instance = instance
2462 a8083063 Iustin Pop
2463 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2464 a8083063 Iustin Pop
    """Shutdown the instance.
2465 a8083063 Iustin Pop

2466 a8083063 Iustin Pop
    """
2467 a8083063 Iustin Pop
    instance = self.instance
2468 a8083063 Iustin Pop
    node_current = instance.primary_node
2469 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2470 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2471 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2472 a8083063 Iustin Pop
2473 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2474 a8083063 Iustin Pop
2475 a8083063 Iustin Pop
2476 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2477 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2478 fe7b0351 Michael Hanselmann

2479 fe7b0351 Michael Hanselmann
  """
2480 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2481 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2482 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2483 fe7b0351 Michael Hanselmann
2484 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2485 fe7b0351 Michael Hanselmann
    """Build hooks env.
2486 fe7b0351 Michael Hanselmann

2487 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2488 fe7b0351 Michael Hanselmann

2489 fe7b0351 Michael Hanselmann
    """
2490 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2491 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2492 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2493 fe7b0351 Michael Hanselmann
    return env, nl, nl
2494 fe7b0351 Michael Hanselmann
2495 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2496 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2497 fe7b0351 Michael Hanselmann

2498 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2499 fe7b0351 Michael Hanselmann

2500 fe7b0351 Michael Hanselmann
    """
2501 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2502 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2503 fe7b0351 Michael Hanselmann
    if instance is None:
2504 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2505 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2506 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2507 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2508 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2509 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2510 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2511 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2512 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2513 fe7b0351 Michael Hanselmann
    if remote_info:
2514 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2515 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2516 3ecf6786 Iustin Pop
                                  instance.primary_node))
2517 d0834de3 Michael Hanselmann
2518 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2519 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2520 d0834de3 Michael Hanselmann
      # OS verification
2521 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2522 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2523 d0834de3 Michael Hanselmann
      if pnode is None:
2524 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2525 3ecf6786 Iustin Pop
                                   self.op.pnode)
2526 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2527 dfa96ded Guido Trotter
      if not os_obj:
2528 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2529 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2530 d0834de3 Michael Hanselmann
2531 fe7b0351 Michael Hanselmann
    self.instance = instance
2532 fe7b0351 Michael Hanselmann
2533 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2534 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2535 fe7b0351 Michael Hanselmann

2536 fe7b0351 Michael Hanselmann
    """
2537 fe7b0351 Michael Hanselmann
    inst = self.instance
2538 fe7b0351 Michael Hanselmann
2539 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2540 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2541 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2542 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2543 d0834de3 Michael Hanselmann
2544 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2545 fe7b0351 Michael Hanselmann
    try:
2546 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2547 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2548 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2549 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2550 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2551 fe7b0351 Michael Hanselmann
    finally:
2552 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2553 fe7b0351 Michael Hanselmann
2554 fe7b0351 Michael Hanselmann
2555 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2556 decd5f45 Iustin Pop
  """Rename an instance.
2557 decd5f45 Iustin Pop

2558 decd5f45 Iustin Pop
  """
2559 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2560 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2561 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2562 decd5f45 Iustin Pop
2563 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2564 decd5f45 Iustin Pop
    """Build hooks env.
2565 decd5f45 Iustin Pop

2566 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2567 decd5f45 Iustin Pop

2568 decd5f45 Iustin Pop
    """
2569 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2570 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2571 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2572 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2573 decd5f45 Iustin Pop
    return env, nl, nl
2574 decd5f45 Iustin Pop
2575 decd5f45 Iustin Pop
  def CheckPrereq(self):
2576 decd5f45 Iustin Pop
    """Check prerequisites.
2577 decd5f45 Iustin Pop

2578 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2579 decd5f45 Iustin Pop

2580 decd5f45 Iustin Pop
    """
2581 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2582 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2583 decd5f45 Iustin Pop
    if instance is None:
2584 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2585 decd5f45 Iustin Pop
                                 self.op.instance_name)
2586 decd5f45 Iustin Pop
    if instance.status != "down":
2587 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2588 decd5f45 Iustin Pop
                                 self.op.instance_name)
2589 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2590 decd5f45 Iustin Pop
    if remote_info:
2591 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2592 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2593 decd5f45 Iustin Pop
                                  instance.primary_node))
2594 decd5f45 Iustin Pop
    self.instance = instance
2595 decd5f45 Iustin Pop
2596 decd5f45 Iustin Pop
    # new name verification
2597 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2598 decd5f45 Iustin Pop
2599 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2600 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2601 7bde3275 Guido Trotter
    if new_name in instance_list:
2602 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2603 c09f363f Manuel Franceschini
                                 new_name)
2604 7bde3275 Guido Trotter
2605 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2606 89e1fc26 Iustin Pop
      command = ["fping", "-q", name_info.ip]
2607 decd5f45 Iustin Pop
      result = utils.RunCmd(command)
2608 decd5f45 Iustin Pop
      if not result.failed:
2609 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2610 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2611 decd5f45 Iustin Pop
2612 decd5f45 Iustin Pop
2613 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2614 decd5f45 Iustin Pop
    """Reinstall the instance.
2615 decd5f45 Iustin Pop

2616 decd5f45 Iustin Pop
    """
2617 decd5f45 Iustin Pop
    inst = self.instance
2618 decd5f45 Iustin Pop
    old_name = inst.name
2619 decd5f45 Iustin Pop
2620 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2621 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2622 b23c4333 Manuel Franceschini
2623 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2624 decd5f45 Iustin Pop
2625 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2626 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2627 decd5f45 Iustin Pop
2628 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2629 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2630 b23c4333 Manuel Franceschini
      result = rpc.call_file_storage_dir_rename(inst.primary_node,
2631 b23c4333 Manuel Franceschini
                                                old_file_storage_dir,
2632 b23c4333 Manuel Franceschini
                                                new_file_storage_dir)
2633 b23c4333 Manuel Franceschini
2634 b23c4333 Manuel Franceschini
      if not result:
2635 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2636 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2637 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2638 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2639 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2640 b23c4333 Manuel Franceschini
2641 b23c4333 Manuel Franceschini
      if not result[0]:
2642 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2643 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2644 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2645 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2646 b23c4333 Manuel Franceschini
2647 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2648 decd5f45 Iustin Pop
    try:
2649 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2650 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2651 f4bc1f2c Michael Hanselmann
        msg = ("Could run OS rename script for instance %s on node %s (but the"
2652 f4bc1f2c Michael Hanselmann
               " instance has been renamed in Ganeti)" %
2653 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2654 decd5f45 Iustin Pop
        logger.Error(msg)
2655 decd5f45 Iustin Pop
    finally:
2656 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2657 decd5f45 Iustin Pop
2658 decd5f45 Iustin Pop
2659 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2660 a8083063 Iustin Pop
  """Remove an instance.
2661 a8083063 Iustin Pop

2662 a8083063 Iustin Pop
  """
2663 a8083063 Iustin Pop
  HPATH = "instance-remove"
2664 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2665 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
2666 a8083063 Iustin Pop
2667 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2668 a8083063 Iustin Pop
    """Build hooks env.
2669 a8083063 Iustin Pop

2670 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2671 a8083063 Iustin Pop

2672 a8083063 Iustin Pop
    """
2673 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2674 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2675 a8083063 Iustin Pop
    return env, nl, nl
2676 a8083063 Iustin Pop
2677 a8083063 Iustin Pop
  def CheckPrereq(self):
2678 a8083063 Iustin Pop
    """Check prerequisites.
2679 a8083063 Iustin Pop

2680 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2681 a8083063 Iustin Pop

2682 a8083063 Iustin Pop
    """
2683 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2684 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2685 a8083063 Iustin Pop
    if instance is None:
2686 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2687 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2688 a8083063 Iustin Pop
    self.instance = instance
2689 a8083063 Iustin Pop
2690 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2691 a8083063 Iustin Pop
    """Remove the instance.
2692 a8083063 Iustin Pop

2693 a8083063 Iustin Pop
    """
2694 a8083063 Iustin Pop
    instance = self.instance
2695 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2696 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2697 a8083063 Iustin Pop
2698 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2699 1d67656e Iustin Pop
      if self.op.ignore_failures:
2700 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2701 1d67656e Iustin Pop
      else:
2702 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2703 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2704 a8083063 Iustin Pop
2705 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2706 a8083063 Iustin Pop
2707 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2708 1d67656e Iustin Pop
      if self.op.ignore_failures:
2709 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2710 1d67656e Iustin Pop
      else:
2711 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2712 a8083063 Iustin Pop
2713 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2714 a8083063 Iustin Pop
2715 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2716 a8083063 Iustin Pop
2717 a8083063 Iustin Pop
2718 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2719 a8083063 Iustin Pop
  """Logical unit for querying instances.
2720 a8083063 Iustin Pop

2721 a8083063 Iustin Pop
  """
2722 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2723 a8083063 Iustin Pop
2724 a8083063 Iustin Pop
  def CheckPrereq(self):
2725 a8083063 Iustin Pop
    """Check prerequisites.
2726 a8083063 Iustin Pop

2727 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2728 a8083063 Iustin Pop

2729 a8083063 Iustin Pop
    """
2730 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2731 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2732 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2733 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2734 d6d415e8 Iustin Pop
                               "sda_size", "sdb_size", "vcpus"],
2735 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2736 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2737 a8083063 Iustin Pop
2738 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2739 069dcc86 Iustin Pop
2740 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2741 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2742 a8083063 Iustin Pop

2743 a8083063 Iustin Pop
    """
2744 069dcc86 Iustin Pop
    instance_names = self.wanted
2745 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2746 a8083063 Iustin Pop
                     in instance_names]
2747 a8083063 Iustin Pop
2748 a8083063 Iustin Pop
    # begin data gathering
2749 a8083063 Iustin Pop
2750 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2751 a8083063 Iustin Pop
2752 a8083063 Iustin Pop
    bad_nodes = []
2753 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2754 a8083063 Iustin Pop
      live_data = {}
2755 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2756 a8083063 Iustin Pop
      for name in nodes:
2757 a8083063 Iustin Pop
        result = node_data[name]
2758 a8083063 Iustin Pop
        if result:
2759 a8083063 Iustin Pop
          live_data.update(result)
2760 a8083063 Iustin Pop
        elif result == False:
2761 a8083063 Iustin Pop
          bad_nodes.append(name)
2762 a8083063 Iustin Pop
        # else no instance is alive
2763 a8083063 Iustin Pop
    else:
2764 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2765 a8083063 Iustin Pop
2766 a8083063 Iustin Pop
    # end data gathering
2767 a8083063 Iustin Pop
2768 a8083063 Iustin Pop
    output = []
2769 a8083063 Iustin Pop
    for instance in instance_list:
2770 a8083063 Iustin Pop
      iout = []
2771 a8083063 Iustin Pop
      for field in self.op.output_fields:
2772 a8083063 Iustin Pop
        if field == "name":
2773 a8083063 Iustin Pop
          val = instance.name
2774 a8083063 Iustin Pop
        elif field == "os":
2775 a8083063 Iustin Pop
          val = instance.os
2776 a8083063 Iustin Pop
        elif field == "pnode":
2777 a8083063 Iustin Pop
          val = instance.primary_node
2778 a8083063 Iustin Pop
        elif field == "snodes":
2779 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2780 a8083063 Iustin Pop
        elif field == "admin_state":
2781 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2782 a8083063 Iustin Pop
        elif field == "oper_state":
2783 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2784 8a23d2d3 Iustin Pop
            val = None
2785 a8083063 Iustin Pop
          else:
2786 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2787 d8052456 Iustin Pop
        elif field == "status":
2788 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2789 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2790 d8052456 Iustin Pop
          else:
2791 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2792 d8052456 Iustin Pop
            if running:
2793 d8052456 Iustin Pop
              if instance.status != "down":
2794 d8052456 Iustin Pop
                val = "running"
2795 d8052456 Iustin Pop
              else:
2796 d8052456 Iustin Pop
                val = "ERROR_up"
2797 d8052456 Iustin Pop
            else:
2798 d8052456 Iustin Pop
              if instance.status != "down":
2799 d8052456 Iustin Pop
                val = "ERROR_down"
2800 d8052456 Iustin Pop
              else:
2801 d8052456 Iustin Pop
                val = "ADMIN_down"
2802 a8083063 Iustin Pop
        elif field == "admin_ram":
2803 a8083063 Iustin Pop
          val = instance.memory
2804 a8083063 Iustin Pop
        elif field == "oper_ram":
2805 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2806 8a23d2d3 Iustin Pop
            val = None
2807 a8083063 Iustin Pop
          elif instance.name in live_data:
2808 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2809 a8083063 Iustin Pop
          else:
2810 a8083063 Iustin Pop
            val = "-"
2811 a8083063 Iustin Pop
        elif field == "disk_template":
2812 a8083063 Iustin Pop
          val = instance.disk_template
2813 a8083063 Iustin Pop
        elif field == "ip":
2814 a8083063 Iustin Pop
          val = instance.nics[0].ip
2815 a8083063 Iustin Pop
        elif field == "bridge":
2816 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2817 a8083063 Iustin Pop
        elif field == "mac":
2818 a8083063 Iustin Pop
          val = instance.nics[0].mac
2819 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2820 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2821 644eeef9 Iustin Pop
          if disk is None:
2822 8a23d2d3 Iustin Pop
            val = None
2823 644eeef9 Iustin Pop
          else:
2824 644eeef9 Iustin Pop
            val = disk.size
2825 d6d415e8 Iustin Pop
        elif field == "vcpus":
2826 d6d415e8 Iustin Pop
          val = instance.vcpus
2827 a8083063 Iustin Pop
        else:
2828 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2829 a8083063 Iustin Pop
        iout.append(val)
2830 a8083063 Iustin Pop
      output.append(iout)
2831 a8083063 Iustin Pop
2832 a8083063 Iustin Pop
    return output
2833 a8083063 Iustin Pop
2834 a8083063 Iustin Pop
2835 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2836 a8083063 Iustin Pop
  """Failover an instance.
2837 a8083063 Iustin Pop

2838 a8083063 Iustin Pop
  """
2839 a8083063 Iustin Pop
  HPATH = "instance-failover"
2840 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2841 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2842 a8083063 Iustin Pop
2843 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2844 a8083063 Iustin Pop
    """Build hooks env.
2845 a8083063 Iustin Pop

2846 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2847 a8083063 Iustin Pop

2848 a8083063 Iustin Pop
    """
2849 a8083063 Iustin Pop
    env = {
2850 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2851 a8083063 Iustin Pop
      }
2852 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2853 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2854 a8083063 Iustin Pop
    return env, nl, nl
2855 a8083063 Iustin Pop
2856 a8083063 Iustin Pop
  def CheckPrereq(self):
2857 a8083063 Iustin Pop
    """Check prerequisites.
2858 a8083063 Iustin Pop

2859 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2860 a8083063 Iustin Pop

2861 a8083063 Iustin Pop
    """
2862 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2863 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2864 a8083063 Iustin Pop
    if instance is None:
2865 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2866 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2867 a8083063 Iustin Pop
2868 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2869 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2870 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2871 2a710df1 Michael Hanselmann
2872 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2873 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2874 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2875 abdf0113 Iustin Pop
                                   "a mirrored disk template")
2876 2a710df1 Michael Hanselmann
2877 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2878 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2879 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2880 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2881 3a7c308e Guido Trotter
2882 a8083063 Iustin Pop
    # check bridge existance
2883 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2884 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2885 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2886 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2887 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2888 a8083063 Iustin Pop
2889 a8083063 Iustin Pop
    self.instance = instance
2890 a8083063 Iustin Pop
2891 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2892 a8083063 Iustin Pop
    """Failover an instance.
2893 a8083063 Iustin Pop

2894 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2895 a8083063 Iustin Pop
    starting it on the secondary.
2896 a8083063 Iustin Pop

2897 a8083063 Iustin Pop
    """
2898 a8083063 Iustin Pop
    instance = self.instance
2899 a8083063 Iustin Pop
2900 a8083063 Iustin Pop
    source_node = instance.primary_node
2901 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2902 a8083063 Iustin Pop
2903 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2904 a8083063 Iustin Pop
    for dev in instance.disks:
2905 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
2906 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2907 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2908 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2909 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2910 a8083063 Iustin Pop
2911 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2912 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2913 a8083063 Iustin Pop
                (instance.name, source_node))
2914 a8083063 Iustin Pop
2915 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2916 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2917 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2918 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2919 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2920 24a40d57 Iustin Pop
      else:
2921 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2922 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2923 a8083063 Iustin Pop
2924 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2925 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2926 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2927 a8083063 Iustin Pop
2928 a8083063 Iustin Pop
    instance.primary_node = target_node
2929 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2930 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2931 a8083063 Iustin Pop
2932 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2933 12a0cfbe Guido Trotter
    if instance.status == "up":
2934 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2935 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2936 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2937 12a0cfbe Guido Trotter
2938 12a0cfbe Guido Trotter
      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2939 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2940 12a0cfbe Guido Trotter
      if not disks_ok:
2941 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2942 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2943 a8083063 Iustin Pop
2944 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2945 12a0cfbe Guido Trotter
      if not rpc.call_instance_start(target_node, instance, None):
2946 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2947 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2948 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2949 a8083063 Iustin Pop
2950 a8083063 Iustin Pop
2951 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2952 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2953 a8083063 Iustin Pop

2954 a8083063 Iustin Pop
  This always creates all devices.
2955 a8083063 Iustin Pop

2956 a8083063 Iustin Pop
  """
2957 a8083063 Iustin Pop
  if device.children:
2958 a8083063 Iustin Pop
    for child in device.children:
2959 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2960 a8083063 Iustin Pop
        return False
2961 a8083063 Iustin Pop
2962 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2963 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2964 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2965 a8083063 Iustin Pop
  if not new_id:
2966 a8083063 Iustin Pop
    return False
2967 a8083063 Iustin Pop
  if device.physical_id is None:
2968 a8083063 Iustin Pop
    device.physical_id = new_id
2969 a8083063 Iustin Pop
  return True
2970 a8083063 Iustin Pop
2971 a8083063 Iustin Pop
2972 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2973 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2974 a8083063 Iustin Pop

2975 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2976 a8083063 Iustin Pop
  all its children.
2977 a8083063 Iustin Pop

2978 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2979 a8083063 Iustin Pop

2980 a8083063 Iustin Pop
  """
2981 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2982 a8083063 Iustin Pop
    force = True
2983 a8083063 Iustin Pop
  if device.children:
2984 a8083063 Iustin Pop
    for child in device.children:
2985 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2986 3f78eef2 Iustin Pop
                                        child, force, info):
2987 a8083063 Iustin Pop
        return False
2988 a8083063 Iustin Pop
2989 a8083063 Iustin Pop
  if not force:
2990 a8083063 Iustin Pop
    return True
2991 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2992 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2993 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2994 a8083063 Iustin Pop
  if not new_id:
2995 a8083063 Iustin Pop
    return False
2996 a8083063 Iustin Pop
  if device.physical_id is None:
2997 a8083063 Iustin Pop
    device.physical_id = new_id
2998 a8083063 Iustin Pop
  return True
2999 a8083063 Iustin Pop
3000 a8083063 Iustin Pop
3001 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
3002 923b1523 Iustin Pop
  """Generate a suitable LV name.
3003 923b1523 Iustin Pop

3004 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
3005 923b1523 Iustin Pop

3006 923b1523 Iustin Pop
  """
3007 923b1523 Iustin Pop
  results = []
3008 923b1523 Iustin Pop
  for val in exts:
3009 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
3010 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
3011 923b1523 Iustin Pop
  return results
3012 923b1523 Iustin Pop
3013 923b1523 Iustin Pop
3014 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
3015 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
3016 a8083063 Iustin Pop

3017 a8083063 Iustin Pop
  """
3018 a8083063 Iustin Pop
  port = cfg.AllocatePort()
3019 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
3020 fe96220b Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3021 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
3022 fe96220b Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3023 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
3024 fe96220b Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
3025 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
3026 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
3027 a8083063 Iustin Pop
  return drbd_dev
3028 a8083063 Iustin Pop
3029 a8083063 Iustin Pop
3030 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
3031 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
3032 a1f445d3 Iustin Pop

3033 a1f445d3 Iustin Pop
  """
3034 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
3035 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
3036 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3037 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
3038 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3039 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
3040 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3041 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
3042 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
3043 a1f445d3 Iustin Pop
                          iv_name=iv_name)
3044 a1f445d3 Iustin Pop
  return drbd_dev
3045 a1f445d3 Iustin Pop
3046 7c0d6283 Michael Hanselmann
3047 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
3048 a8083063 Iustin Pop
                          instance_name, primary_node,
3049 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
3050 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
3051 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
3052 a8083063 Iustin Pop

3053 a8083063 Iustin Pop
  """
3054 a8083063 Iustin Pop
  #TODO: compute space requirements
3055 a8083063 Iustin Pop
3056 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
3057 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
3058 a8083063 Iustin Pop
    disks = []
3059 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
3060 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
3061 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3062 923b1523 Iustin Pop
3063 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
3064 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
3065 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
3066 a8083063 Iustin Pop
                           iv_name = "sda")
3067 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
3068 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
3069 a8083063 Iustin Pop
                           iv_name = "sdb")
3070 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
3071 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
3072 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
3073 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3074 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
3075 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
3076 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
3077 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
3078 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
3079 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
3080 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
3081 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
3082 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3083 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3084 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3085 0f1a06e3 Manuel Franceschini
3086 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
3087 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
3088 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
3089 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
3090 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
3091 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
3092 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
3093 a8083063 Iustin Pop
  else:
3094 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3095 a8083063 Iustin Pop
  return disks
3096 a8083063 Iustin Pop
3097 a8083063 Iustin Pop
3098 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3099 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3100 3ecf6786 Iustin Pop

3101 3ecf6786 Iustin Pop
  """
3102 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3103 a0c3fea1 Michael Hanselmann
3104 a0c3fea1 Michael Hanselmann
3105 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
3106 a8083063 Iustin Pop
  """Create all disks for an instance.
3107 a8083063 Iustin Pop

3108 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3109 a8083063 Iustin Pop

3110 a8083063 Iustin Pop
  Args:
3111 a8083063 Iustin Pop
    instance: the instance object
3112 a8083063 Iustin Pop

3113 a8083063 Iustin Pop
  Returns:
3114 a8083063 Iustin Pop
    True or False showing the success of the creation process
3115 a8083063 Iustin Pop

3116 a8083063 Iustin Pop
  """
3117 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3118 a0c3fea1 Michael Hanselmann
3119 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3120 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3121 0f1a06e3 Manuel Franceschini
    result = rpc.call_file_storage_dir_create(instance.primary_node,
3122 0f1a06e3 Manuel Franceschini
                                              file_storage_dir)
3123 0f1a06e3 Manuel Franceschini
3124 0f1a06e3 Manuel Franceschini
    if not result:
3125 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
3126 0f1a06e3 Manuel Franceschini
      return False
3127 0f1a06e3 Manuel Franceschini
3128 0f1a06e3 Manuel Franceschini
    if not result[0]:
3129 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
3130 0f1a06e3 Manuel Franceschini
      return False
3131 0f1a06e3 Manuel Franceschini
3132 a8083063 Iustin Pop
  for device in instance.disks:
3133 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
3134 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
3135 a8083063 Iustin Pop
    #HARDCODE
3136 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3137 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
3138 3f78eef2 Iustin Pop
                                        device, False, info):
3139 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
3140 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
3141 a8083063 Iustin Pop
        return False
3142 a8083063 Iustin Pop
    #HARDCODE
3143 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3144 3f78eef2 Iustin Pop
                                    instance, device, info):
3145 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
3146 a8083063 Iustin Pop
                   device.iv_name)
3147 a8083063 Iustin Pop
      return False
3148 1c6e3627 Manuel Franceschini
3149 a8083063 Iustin Pop
  return True
3150 a8083063 Iustin Pop
3151 a8083063 Iustin Pop
3152 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
3153 a8083063 Iustin Pop
  """Remove all disks for an instance.
3154 a8083063 Iustin Pop

3155 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3156 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3157 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3158 a8083063 Iustin Pop
  with `_CreateDisks()`).
3159 a8083063 Iustin Pop

3160 a8083063 Iustin Pop
  Args:
3161 a8083063 Iustin Pop
    instance: the instance object
3162 a8083063 Iustin Pop

3163 a8083063 Iustin Pop
  Returns:
3164 a8083063 Iustin Pop
    True or False showing the success of the removal proces
3165 a8083063 Iustin Pop

3166 a8083063 Iustin Pop
  """
3167 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
3168 a8083063 Iustin Pop
3169 a8083063 Iustin Pop
  result = True
3170 a8083063 Iustin Pop
  for device in instance.disks:
3171 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3172 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
3173 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
3174 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
3175 a8083063 Iustin Pop
                     " continuing anyway" %
3176 a8083063 Iustin Pop
                     (device.iv_name, node))
3177 a8083063 Iustin Pop
        result = False
3178 0f1a06e3 Manuel Franceschini
3179 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3180 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3181 0f1a06e3 Manuel Franceschini
    if not rpc.call_file_storage_dir_remove(instance.primary_node,
3182 0f1a06e3 Manuel Franceschini
                                            file_storage_dir):
3183 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
3184 0f1a06e3 Manuel Franceschini
      result = False
3185 0f1a06e3 Manuel Franceschini
3186 a8083063 Iustin Pop
  return result
3187 a8083063 Iustin Pop
3188 a8083063 Iustin Pop
3189 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
3190 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3191 e2fe6369 Iustin Pop

3192 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
3193 e2fe6369 Iustin Pop

3194 e2fe6369 Iustin Pop
  """
3195 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3196 e2fe6369 Iustin Pop
  req_size_dict = {
3197 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3198 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
3199 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
3200 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
3201 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3202 e2fe6369 Iustin Pop
  }
3203 e2fe6369 Iustin Pop
3204 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3205 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3206 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3207 e2fe6369 Iustin Pop
3208 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3209 e2fe6369 Iustin Pop
3210 e2fe6369 Iustin Pop
3211 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3212 a8083063 Iustin Pop
  """Create an instance.
3213 a8083063 Iustin Pop

3214 a8083063 Iustin Pop
  """
3215 a8083063 Iustin Pop
  HPATH = "instance-add"
3216 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3217 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
3218 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
3219 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
3220 a8083063 Iustin Pop
3221 538475ca Iustin Pop
  def _RunAllocator(self):
3222 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3223 538475ca Iustin Pop

3224 538475ca Iustin Pop
    """
3225 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3226 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3227 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3228 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3229 d1c2dd75 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3230 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3231 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3232 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3233 d1c2dd75 Iustin Pop
                     tags=[],
3234 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3235 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
3236 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
3237 d1c2dd75 Iustin Pop
                     disks=disks,
3238 d1c2dd75 Iustin Pop
                     nics=nics,
3239 29859cb7 Iustin Pop
                     )
3240 d1c2dd75 Iustin Pop
3241 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3242 d1c2dd75 Iustin Pop
3243 d1c2dd75 Iustin Pop
    if not ial.success:
3244 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3245 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3246 d1c2dd75 Iustin Pop
                                                           ial.info))
3247 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3248 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3249 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3250 27579978 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3251 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3252 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
3253 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
3254 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
3255 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
3256 27579978 Iustin Pop
    if ial.required_nodes == 2:
3257 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3258 538475ca Iustin Pop
3259 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3260 a8083063 Iustin Pop
    """Build hooks env.
3261 a8083063 Iustin Pop

3262 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3263 a8083063 Iustin Pop

3264 a8083063 Iustin Pop
    """
3265 a8083063 Iustin Pop
    env = {
3266 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3267 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3268 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3269 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3270 a8083063 Iustin Pop
      }
3271 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3272 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3273 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3274 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
3275 396e1b78 Michael Hanselmann
3276 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3277 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3278 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3279 396e1b78 Michael Hanselmann
      status=self.instance_status,
3280 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3281 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
3282 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
3283 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3284 396e1b78 Michael Hanselmann
    ))
3285 a8083063 Iustin Pop
3286 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
3287 a8083063 Iustin Pop
          self.secondaries)
3288 a8083063 Iustin Pop
    return env, nl, nl
3289 a8083063 Iustin Pop
3290 a8083063 Iustin Pop
3291 a8083063 Iustin Pop
  def CheckPrereq(self):
3292 a8083063 Iustin Pop
    """Check prerequisites.
3293 a8083063 Iustin Pop

3294 a8083063 Iustin Pop
    """
3295 538475ca Iustin Pop
    # set optional parameters to none if they don't exist
3296 538475ca Iustin Pop
    for attr in ["kernel_path", "initrd_path", "hvm_boot_order", "pnode",
3297 31a853d2 Iustin Pop
                 "iallocator", "hvm_acpi", "hvm_pae", "hvm_cdrom_image_path",
3298 31a853d2 Iustin Pop
                 "vnc_bind_address"]:
3299 40ed12dd Guido Trotter
      if not hasattr(self.op, attr):
3300 40ed12dd Guido Trotter
        setattr(self.op, attr, None)
3301 40ed12dd Guido Trotter
3302 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
3303 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
3304 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3305 3ecf6786 Iustin Pop
                                 self.op.mode)
3306 a8083063 Iustin Pop
3307 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3308 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3309 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3310 eedc99de Manuel Franceschini
                                 " instances")
3311 eedc99de Manuel Franceschini
3312 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3313 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
3314 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
3315 a8083063 Iustin Pop
      if src_node is None or src_path is None:
3316 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
3317 3ecf6786 Iustin Pop
                                   " node and path options")
3318 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
3319 a8083063 Iustin Pop
      if src_node_full is None:
3320 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
3321 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
3322 a8083063 Iustin Pop
3323 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
3324 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
3325 a8083063 Iustin Pop
3326 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
3327 a8083063 Iustin Pop
3328 a8083063 Iustin Pop
      if not export_info:
3329 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3330 a8083063 Iustin Pop
3331 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3332 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3333 a8083063 Iustin Pop
3334 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3335 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3336 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3337 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3338 a8083063 Iustin Pop
3339 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3340 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3341 3ecf6786 Iustin Pop
                                   " one data disk")
3342 a8083063 Iustin Pop
3343 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3344 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3345 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3346 a8083063 Iustin Pop
                                                         'disk0_dump'))
3347 a8083063 Iustin Pop
      self.src_image = diskimage
3348 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
3349 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
3350 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
3351 a8083063 Iustin Pop
3352 901a65c1 Iustin Pop
    #### instance parameters check
3353 901a65c1 Iustin Pop
3354 a8083063 Iustin Pop
    # disk template and mirror node verification
3355 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3356 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
3357 a8083063 Iustin Pop
3358 901a65c1 Iustin Pop
    # instance name verification
3359 901a65c1 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
3360 901a65c1 Iustin Pop
3361 901a65c1 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
3362 901a65c1 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
3363 901a65c1 Iustin Pop
    if instance_name in instance_list:
3364 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3365 901a65c1 Iustin Pop
                                 instance_name)
3366 901a65c1 Iustin Pop
3367 901a65c1 Iustin Pop
    # ip validity checks
3368 901a65c1 Iustin Pop
    ip = getattr(self.op, "ip", None)
3369 901a65c1 Iustin Pop
    if ip is None or ip.lower() == "none":
3370 901a65c1 Iustin Pop
      inst_ip = None
3371 901a65c1 Iustin Pop
    elif ip.lower() == "auto":
3372 901a65c1 Iustin Pop
      inst_ip = hostname1.ip
3373 901a65c1 Iustin Pop
    else:
3374 901a65c1 Iustin Pop
      if not utils.IsValidIP(ip):
3375 901a65c1 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3376 901a65c1 Iustin Pop
                                   " like a valid IP" % ip)
3377 901a65c1 Iustin Pop
      inst_ip = ip
3378 901a65c1 Iustin Pop
    self.inst_ip = self.op.ip = inst_ip
3379 901a65c1 Iustin Pop
3380 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3381 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3382 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3383 901a65c1 Iustin Pop
3384 901a65c1 Iustin Pop
    if self.op.ip_check:
3385 901a65c1 Iustin Pop
      if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT):
3386 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3387 901a65c1 Iustin Pop
                                   (hostname1.ip, instance_name))
3388 901a65c1 Iustin Pop
3389 901a65c1 Iustin Pop
    # MAC address verification
3390 901a65c1 Iustin Pop
    if self.op.mac != "auto":
3391 901a65c1 Iustin Pop
      if not utils.IsValidMac(self.op.mac.lower()):
3392 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3393 901a65c1 Iustin Pop
                                   self.op.mac)
3394 901a65c1 Iustin Pop
3395 901a65c1 Iustin Pop
    # bridge verification
3396 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3397 901a65c1 Iustin Pop
    if bridge is None:
3398 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3399 901a65c1 Iustin Pop
    else:
3400 901a65c1 Iustin Pop
      self.op.bridge = bridge
3401 901a65c1 Iustin Pop
3402 901a65c1 Iustin Pop
    # boot order verification
3403 901a65c1 Iustin Pop
    if self.op.hvm_boot_order is not None:
3404 901a65c1 Iustin Pop
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3405 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid boot order specified,"
3406 901a65c1 Iustin Pop
                                   " must be one or more of [acdn]")
3407 901a65c1 Iustin Pop
    # file storage checks
3408 0f1a06e3 Manuel Franceschini
    if (self.op.file_driver and
3409 0f1a06e3 Manuel Franceschini
        not self.op.file_driver in constants.FILE_DRIVER):
3410 0f1a06e3 Manuel Franceschini
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3411 0f1a06e3 Manuel Franceschini
                                 self.op.file_driver)
3412 0f1a06e3 Manuel Franceschini
3413 0f1a06e3 Manuel Franceschini
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3414 b4de68a9 Iustin Pop
      raise errors.OpPrereqError("File storage directory not a relative"
3415 b4de68a9 Iustin Pop
                                 " path")
3416 538475ca Iustin Pop
    #### allocator run
3417 538475ca Iustin Pop
3418 538475ca Iustin Pop
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3419 538475ca Iustin Pop
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3420 538475ca Iustin Pop
                                 " node must be given")
3421 538475ca Iustin Pop
3422 538475ca Iustin Pop
    if self.op.iallocator is not None:
3423 538475ca Iustin Pop
      self._RunAllocator()
3424 0f1a06e3 Manuel Franceschini
3425 901a65c1 Iustin Pop
    #### node related checks
3426 901a65c1 Iustin Pop
3427 901a65c1 Iustin Pop
    # check primary node
3428 901a65c1 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
3429 901a65c1 Iustin Pop
    if pnode is None:
3430 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
3431 901a65c1 Iustin Pop
                                 self.op.pnode)
3432 901a65c1 Iustin Pop
    self.op.pnode = pnode.name
3433 901a65c1 Iustin Pop
    self.pnode = pnode
3434 901a65c1 Iustin Pop
    self.secondaries = []
3435 901a65c1 Iustin Pop
3436 901a65c1 Iustin Pop
    # mirror node verification
3437 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3438 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
3439 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3440 3ecf6786 Iustin Pop
                                   " a mirror node")
3441 a8083063 Iustin Pop
3442 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
3443 a8083063 Iustin Pop
      if snode_name is None:
3444 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
3445 3ecf6786 Iustin Pop
                                   self.op.snode)
3446 a8083063 Iustin Pop
      elif snode_name == pnode.name:
3447 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3448 3ecf6786 Iustin Pop
                                   " the primary node.")
3449 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
3450 a8083063 Iustin Pop
3451 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3452 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3453 ed1ebc60 Guido Trotter
3454 8d75db10 Iustin Pop
    # Check lv size requirements
3455 8d75db10 Iustin Pop
    if req_size is not None:
3456 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3457 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3458 8d75db10 Iustin Pop
      for node in nodenames:
3459 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3460 8d75db10 Iustin Pop
        if not info:
3461 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3462 8d75db10 Iustin Pop
                                     " from node '%s'" % nodeinfo)
3463 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3464 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3465 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3466 8d75db10 Iustin Pop
                                     " node %s" % node)
3467 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3468 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3469 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3470 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3471 ed1ebc60 Guido Trotter
3472 a8083063 Iustin Pop
    # os verification
3473 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3474 dfa96ded Guido Trotter
    if not os_obj:
3475 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3476 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3477 a8083063 Iustin Pop
3478 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3479 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3480 3b6d8c9b Iustin Pop
3481 a8083063 Iustin Pop
3482 901a65c1 Iustin Pop
    # bridge check on primary node
3483 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3484 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3485 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3486 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3487 a8083063 Iustin Pop
3488 49ce1563 Iustin Pop
    # memory check on primary node
3489 49ce1563 Iustin Pop
    if self.op.start:
3490 49ce1563 Iustin Pop
      _CheckNodeFreeMemory(self.cfg, self.pnode.name,
3491 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
3492 49ce1563 Iustin Pop
                           self.op.mem_size)
3493 49ce1563 Iustin Pop
3494 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
3495 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
3496 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
3497 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
3498 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
3499 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3500 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
3501 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
3502 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
3503 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
3504 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
3505 31a853d2 Iustin Pop
3506 31a853d2 Iustin Pop
    # vnc_bind_address verification
3507 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
3508 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
3509 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
3510 31a853d2 Iustin Pop
                                   " like a valid IP address" %
3511 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
3512 31a853d2 Iustin Pop
3513 a8083063 Iustin Pop
    if self.op.start:
3514 a8083063 Iustin Pop
      self.instance_status = 'up'
3515 a8083063 Iustin Pop
    else:
3516 a8083063 Iustin Pop
      self.instance_status = 'down'
3517 a8083063 Iustin Pop
3518 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3519 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3520 a8083063 Iustin Pop

3521 a8083063 Iustin Pop
    """
3522 a8083063 Iustin Pop
    instance = self.op.instance_name
3523 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3524 a8083063 Iustin Pop
3525 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3526 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3527 1862d460 Alexander Schreiber
    else:
3528 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3529 1862d460 Alexander Schreiber
3530 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3531 a8083063 Iustin Pop
    if self.inst_ip is not None:
3532 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3533 a8083063 Iustin Pop
3534 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3535 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3536 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3537 2a6469d5 Alexander Schreiber
    else:
3538 2a6469d5 Alexander Schreiber
      network_port = None
3539 58acb49d Alexander Schreiber
3540 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is None:
3541 31a853d2 Iustin Pop
      self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3542 31a853d2 Iustin Pop
3543 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3544 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3545 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3546 2c313123 Manuel Franceschini
    else:
3547 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3548 2c313123 Manuel Franceschini
3549 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3550 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3551 0f1a06e3 Manuel Franceschini
                                        self.sstore.GetFileStorageDir(),
3552 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3553 0f1a06e3 Manuel Franceschini
3554 0f1a06e3 Manuel Franceschini
3555 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3556 a8083063 Iustin Pop
                                  self.op.disk_template,
3557 a8083063 Iustin Pop
                                  instance, pnode_name,
3558 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3559 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3560 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3561 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3562 a8083063 Iustin Pop
3563 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3564 a8083063 Iustin Pop
                            primary_node=pnode_name,
3565 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3566 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3567 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3568 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3569 a8083063 Iustin Pop
                            status=self.instance_status,
3570 58acb49d Alexander Schreiber
                            network_port=network_port,
3571 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3572 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3573 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3574 31a853d2 Iustin Pop
                            hvm_acpi=self.op.hvm_acpi,
3575 31a853d2 Iustin Pop
                            hvm_pae=self.op.hvm_pae,
3576 31a853d2 Iustin Pop
                            hvm_cdrom_image_path=self.op.hvm_cdrom_image_path,
3577 31a853d2 Iustin Pop
                            vnc_bind_address=self.op.vnc_bind_address,
3578 a8083063 Iustin Pop
                            )
3579 a8083063 Iustin Pop
3580 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3581 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3582 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3583 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3584 a8083063 Iustin Pop
3585 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3586 a8083063 Iustin Pop
3587 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3588 a8083063 Iustin Pop
3589 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3590 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3591 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3592 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3593 a8083063 Iustin Pop
      time.sleep(15)
3594 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3595 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3596 a8083063 Iustin Pop
    else:
3597 a8083063 Iustin Pop
      disk_abort = False
3598 a8083063 Iustin Pop
3599 a8083063 Iustin Pop
    if disk_abort:
3600 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3601 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3602 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3603 3ecf6786 Iustin Pop
                               " this instance")
3604 a8083063 Iustin Pop
3605 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3606 a8083063 Iustin Pop
                (instance, pnode_name))
3607 a8083063 Iustin Pop
3608 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3609 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3610 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3611 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3612 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3613 3ecf6786 Iustin Pop
                                   " on node %s" %
3614 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3615 a8083063 Iustin Pop
3616 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3617 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3618 a8083063 Iustin Pop
        src_node = self.op.src_node
3619 a8083063 Iustin Pop
        src_image = self.src_image
3620 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3621 a8083063 Iustin Pop
                                                src_node, src_image):
3622 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3623 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3624 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3625 a8083063 Iustin Pop
      else:
3626 a8083063 Iustin Pop
        # also checked in the prereq part
3627 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3628 3ecf6786 Iustin Pop
                                     % self.op.mode)
3629 a8083063 Iustin Pop
3630 a8083063 Iustin Pop
    if self.op.start:
3631 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3632 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3633 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3634 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3635 a8083063 Iustin Pop
3636 a8083063 Iustin Pop
3637 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3638 a8083063 Iustin Pop
  """Connect to an instance's console.
3639 a8083063 Iustin Pop

3640 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3641 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3642 a8083063 Iustin Pop
  console.
3643 a8083063 Iustin Pop

3644 a8083063 Iustin Pop
  """
3645 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3646 a8083063 Iustin Pop
3647 a8083063 Iustin Pop
  def CheckPrereq(self):
3648 a8083063 Iustin Pop
    """Check prerequisites.
3649 a8083063 Iustin Pop

3650 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3651 a8083063 Iustin Pop

3652 a8083063 Iustin Pop
    """
3653 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3654 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3655 a8083063 Iustin Pop
    if instance is None:
3656 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3657 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3658 a8083063 Iustin Pop
    self.instance = instance
3659 a8083063 Iustin Pop
3660 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3661 a8083063 Iustin Pop
    """Connect to the console of an instance
3662 a8083063 Iustin Pop

3663 a8083063 Iustin Pop
    """
3664 a8083063 Iustin Pop
    instance = self.instance
3665 a8083063 Iustin Pop
    node = instance.primary_node
3666 a8083063 Iustin Pop
3667 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3668 a8083063 Iustin Pop
    if node_insts is False:
3669 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3670 a8083063 Iustin Pop
3671 a8083063 Iustin Pop
    if instance.name not in node_insts:
3672 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3673 a8083063 Iustin Pop
3674 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3675 a8083063 Iustin Pop
3676 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3677 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3678 b047857b Michael Hanselmann
3679 82122173 Iustin Pop
    # build ssh cmdline
3680 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3681 a8083063 Iustin Pop
3682 a8083063 Iustin Pop
3683 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3684 a8083063 Iustin Pop
  """Replace the disks of an instance.
3685 a8083063 Iustin Pop

3686 a8083063 Iustin Pop
  """
3687 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3688 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3689 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3690 a8083063 Iustin Pop
3691 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3692 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3693 b6e82a65 Iustin Pop

3694 b6e82a65 Iustin Pop
    """
3695 b6e82a65 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3696 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3697 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3698 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3699 b6e82a65 Iustin Pop
3700 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3701 b6e82a65 Iustin Pop
3702 b6e82a65 Iustin Pop
    if not ial.success:
3703 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3704 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3705 b6e82a65 Iustin Pop
                                                           ial.info))
3706 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3707 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3708 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3709 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3710 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3711 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3712 b6e82a65 Iustin Pop
                    self.op.remote_node)
3713 b6e82a65 Iustin Pop
3714 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3715 a8083063 Iustin Pop
    """Build hooks env.
3716 a8083063 Iustin Pop

3717 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3718 a8083063 Iustin Pop

3719 a8083063 Iustin Pop
    """
3720 a8083063 Iustin Pop
    env = {
3721 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3722 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3723 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3724 a8083063 Iustin Pop
      }
3725 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3726 0834c866 Iustin Pop
    nl = [
3727 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3728 0834c866 Iustin Pop
      self.instance.primary_node,
3729 0834c866 Iustin Pop
      ]
3730 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3731 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3732 a8083063 Iustin Pop
    return env, nl, nl
3733 a8083063 Iustin Pop
3734 a8083063 Iustin Pop
  def CheckPrereq(self):
3735 a8083063 Iustin Pop
    """Check prerequisites.
3736 a8083063 Iustin Pop

3737 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3738 a8083063 Iustin Pop

3739 a8083063 Iustin Pop
    """
3740 b6e82a65 Iustin Pop
    if not hasattr(self.op, "remote_node"):
3741 b6e82a65 Iustin Pop
      self.op.remote_node = None
3742 b6e82a65 Iustin Pop
3743 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3744 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3745 a8083063 Iustin Pop
    if instance is None:
3746 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3747 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3748 a8083063 Iustin Pop
    self.instance = instance
3749 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3750 a8083063 Iustin Pop
3751 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3752 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3753 a9e0c397 Iustin Pop
                                 " network mirrored.")
3754 a8083063 Iustin Pop
3755 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3756 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3757 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3758 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3759 a8083063 Iustin Pop
3760 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3761 a9e0c397 Iustin Pop
3762 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3763 b6e82a65 Iustin Pop
    if ia_name is not None:
3764 b6e82a65 Iustin Pop
      if self.op.remote_node is not None:
3765 b6e82a65 Iustin Pop
        raise errors.OpPrereqError("Give either the iallocator or the new"
3766 b6e82a65 Iustin Pop
                                   " secondary, not both")
3767 b6e82a65 Iustin Pop
      self.op.remote_node = self._RunAllocator()
3768 b6e82a65 Iustin Pop
3769 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3770 a9e0c397 Iustin Pop
    if remote_node is not None:
3771 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3772 a8083063 Iustin Pop
      if remote_node is None:
3773 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3774 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3775 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3776 a9e0c397 Iustin Pop
    else:
3777 a9e0c397 Iustin Pop
      self.remote_node_info = None
3778 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3779 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3780 3ecf6786 Iustin Pop
                                 " the instance.")
3781 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3782 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3783 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3784 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3785 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3786 0834c866 Iustin Pop
                                   " replacement")
3787 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3788 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3789 7df43a76 Iustin Pop
          remote_node is not None):
3790 7df43a76 Iustin Pop
        # switch to replace secondary mode
3791 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3792 7df43a76 Iustin Pop
3793 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3794 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3795 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3796 a9e0c397 Iustin Pop
                                   " both at once")
3797 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3798 a9e0c397 Iustin Pop
        if remote_node is not None:
3799 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3800 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3801 a9e0c397 Iustin Pop
                                     " node disk replacement")
3802 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3803 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3804 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3805 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3806 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3807 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3808 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3809 a9e0c397 Iustin Pop
      else:
3810 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3811 a9e0c397 Iustin Pop
3812 a9e0c397 Iustin Pop
    for name in self.op.disks:
3813 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3814 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3815 a9e0c397 Iustin Pop
                                   (name, instance.name))
3816 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3817 a8083063 Iustin Pop
3818 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3819 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3820 a9e0c397 Iustin Pop

3821 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3822 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3823 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3824 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3825 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3826 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3827 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3828 a9e0c397 Iustin Pop
      - wait for sync across all devices
3829 a9e0c397 Iustin Pop
      - for each modified disk:
3830 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3831 a9e0c397 Iustin Pop

3832 a9e0c397 Iustin Pop
    Failures are not very well handled.
3833 cff90b79 Iustin Pop

3834 a9e0c397 Iustin Pop
    """
3835 cff90b79 Iustin Pop
    steps_total = 6
3836 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3837 a9e0c397 Iustin Pop
    instance = self.instance
3838 a9e0c397 Iustin Pop
    iv_names = {}
3839 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3840 a9e0c397 Iustin Pop
    # start of work
3841 a9e0c397 Iustin Pop
    cfg = self.cfg
3842 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3843 cff90b79 Iustin Pop
    oth_node = self.oth_node
3844 cff90b79 Iustin Pop
3845 cff90b79 Iustin Pop
    # Step: check device activation
3846 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3847 cff90b79 Iustin Pop
    info("checking volume groups")
3848 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3849 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3850 cff90b79 Iustin Pop
    if not results:
3851 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3852 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3853 cff90b79 Iustin Pop
      res = results.get(node, False)
3854 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3855 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3856 cff90b79 Iustin Pop
                                 (my_vg, node))
3857 cff90b79 Iustin Pop
    for dev in instance.disks:
3858 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3859 cff90b79 Iustin Pop
        continue
3860 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3861 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3862 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3863 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3864 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3865 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3866 cff90b79 Iustin Pop
3867 cff90b79 Iustin Pop
    # Step: check other node consistency
3868 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3869 cff90b79 Iustin Pop
    for dev in instance.disks:
3870 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3871 cff90b79 Iustin Pop
        continue
3872 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3873 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3874 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3875 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3876 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3877 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3878 cff90b79 Iustin Pop
3879 cff90b79 Iustin Pop
    # Step: create new storage
3880 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3881 a9e0c397 Iustin Pop
    for dev in instance.disks:
3882 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3883 a9e0c397 Iustin Pop
        continue
3884 a9e0c397 Iustin Pop
      size = dev.size
3885 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3886 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3887 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3888 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3889 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3890 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3891 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3892 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3893 a9e0c397 Iustin Pop
      old_lvs = dev.children
3894 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3895 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3896 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3897 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3898 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3899 a9e0c397 Iustin Pop
      # are talking about the secondary node
3900 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3901 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3902 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3903 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3904 a9e0c397 Iustin Pop
                                   " node '%s'" %
3905 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3906 a9e0c397 Iustin Pop
3907 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3908 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3909 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3910 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3911 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3912 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3913 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3914 cff90b79 Iustin Pop
      #dev.children = []
3915 cff90b79 Iustin Pop
      #cfg.Update(instance)
3916 a9e0c397 Iustin Pop
3917 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3918 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3919 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3920 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3921 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3922 cff90b79 Iustin Pop
3923 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3924 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3925 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3926 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3927 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3928 cff90b79 Iustin Pop
      rlist = []
3929 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3930 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3931 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3932 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3933 cff90b79 Iustin Pop
3934 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3935 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3936 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3937 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3938 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3939 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3940 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3941 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3942 cff90b79 Iustin Pop
3943 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3944 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3945 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3946 a9e0c397 Iustin Pop
3947 cff90b79 Iustin Pop
      for disk in old_lvs:
3948 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3949 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3950 a9e0c397 Iustin Pop
3951 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3952 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3953 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3954 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3955 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3956 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3957 cff90b79 Iustin Pop
                    " logical volumes")
3958 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3959 a9e0c397 Iustin Pop
3960 a9e0c397 Iustin Pop
      dev.children = new_lvs
3961 a9e0c397 Iustin Pop
      cfg.Update(instance)
3962 a9e0c397 Iustin Pop
3963 cff90b79 Iustin Pop
    # Step: wait for sync
3964 a9e0c397 Iustin Pop
3965 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3966 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3967 a9e0c397 Iustin Pop
    # return value
3968 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3969 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3970 a9e0c397 Iustin Pop
3971 a9e0c397 Iustin Pop
    # so check manually all the devices
3972 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3973 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3974 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3975 a9e0c397 Iustin Pop
      if is_degr:
3976 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3977 a9e0c397 Iustin Pop
3978 cff90b79 Iustin Pop
    # Step: remove old storage
3979 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3980 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3981 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3982 a9e0c397 Iustin Pop
      for lv in old_lvs:
3983 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3984 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3985 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3986 a9e0c397 Iustin Pop
          continue
3987 a9e0c397 Iustin Pop
3988 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3989 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3990 a9e0c397 Iustin Pop

3991 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3992 a9e0c397 Iustin Pop
      - for all disks of the instance:
3993 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3994 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3995 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3996 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3997 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3998 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3999 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
4000 a9e0c397 Iustin Pop
          not network enabled
4001 a9e0c397 Iustin Pop
      - wait for sync across all devices
4002 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
4003 a9e0c397 Iustin Pop

4004 a9e0c397 Iustin Pop
    Failures are not very well handled.
4005 0834c866 Iustin Pop

4006 a9e0c397 Iustin Pop
    """
4007 0834c866 Iustin Pop
    steps_total = 6
4008 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4009 a9e0c397 Iustin Pop
    instance = self.instance
4010 a9e0c397 Iustin Pop
    iv_names = {}
4011 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4012 a9e0c397 Iustin Pop
    # start of work
4013 a9e0c397 Iustin Pop
    cfg = self.cfg
4014 a9e0c397 Iustin Pop
    old_node = self.tgt_node
4015 a9e0c397 Iustin Pop
    new_node = self.new_node
4016 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
4017 0834c866 Iustin Pop
4018 0834c866 Iustin Pop
    # Step: check device activation
4019 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4020 0834c866 Iustin Pop
    info("checking volume groups")
4021 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
4022 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
4023 0834c866 Iustin Pop
    if not results:
4024 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4025 0834c866 Iustin Pop
    for node in pri_node, new_node:
4026 0834c866 Iustin Pop
      res = results.get(node, False)
4027 0834c866 Iustin Pop
      if not res or my_vg not in res:
4028 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4029 0834c866 Iustin Pop
                                 (my_vg, node))
4030 0834c866 Iustin Pop
    for dev in instance.disks:
4031 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4032 0834c866 Iustin Pop
        continue
4033 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
4034 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4035 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
4036 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
4037 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
4038 0834c866 Iustin Pop
4039 0834c866 Iustin Pop
    # Step: check other node consistency
4040 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4041 0834c866 Iustin Pop
    for dev in instance.disks:
4042 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4043 0834c866 Iustin Pop
        continue
4044 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
4045 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
4046 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4047 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4048 0834c866 Iustin Pop
                                 pri_node)
4049 0834c866 Iustin Pop
4050 0834c866 Iustin Pop
    # Step: create new storage
4051 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4052 a9e0c397 Iustin Pop
    for dev in instance.disks:
4053 a9e0c397 Iustin Pop
      size = dev.size
4054 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
4055 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4056 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4057 a9e0c397 Iustin Pop
      # are talking about the secondary node
4058 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4059 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
4060 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4061 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4062 a9e0c397 Iustin Pop
                                   " node '%s'" %
4063 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4064 a9e0c397 Iustin Pop
4065 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
4066 0834c866 Iustin Pop
4067 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4068 0834c866 Iustin Pop
    for dev in instance.disks:
4069 0834c866 Iustin Pop
      size = dev.size
4070 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
4071 a9e0c397 Iustin Pop
      # create new devices on new_node
4072 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4073 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
4074 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
4075 a9e0c397 Iustin Pop
                              children=dev.children)
4076 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
4077 3f78eef2 Iustin Pop
                                        new_drbd, False,
4078 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
4079 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4080 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4081 a9e0c397 Iustin Pop
4082 0834c866 Iustin Pop
    for dev in instance.disks:
4083 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4084 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
4085 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4086 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
4087 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
4088 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4089 a9e0c397 Iustin Pop
4090 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4091 642445d9 Iustin Pop
    done = 0
4092 642445d9 Iustin Pop
    for dev in instance.disks:
4093 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4094 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
4095 642445d9 Iustin Pop
      # detach from network
4096 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
4097 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4098 642445d9 Iustin Pop
      # standalone state
4099 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
4100 642445d9 Iustin Pop
        done += 1
4101 642445d9 Iustin Pop
      else:
4102 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
4103 642445d9 Iustin Pop
                dev.iv_name)
4104 642445d9 Iustin Pop
4105 642445d9 Iustin Pop
    if not done:
4106 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4107 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4108 642445d9 Iustin Pop
4109 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4110 642445d9 Iustin Pop
    # the instance to point to the new secondary
4111 642445d9 Iustin Pop
    info("updating instance configuration")
4112 642445d9 Iustin Pop
    for dev in instance.disks:
4113 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
4114 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4115 642445d9 Iustin Pop
    cfg.Update(instance)
4116 a9e0c397 Iustin Pop
4117 642445d9 Iustin Pop
    # and now perform the drbd attach
4118 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4119 642445d9 Iustin Pop
    failures = []
4120 642445d9 Iustin Pop
    for dev in instance.disks:
4121 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
4122 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4123 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4124 642445d9 Iustin Pop
      # is correct
4125 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4126 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
4127 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
4128 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4129 a9e0c397 Iustin Pop
4130 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4131 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4132 a9e0c397 Iustin Pop
    # return value
4133 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4134 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
4135 a9e0c397 Iustin Pop
4136 a9e0c397 Iustin Pop
    # so check manually all the devices
4137 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
4138 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4139 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
4140 a9e0c397 Iustin Pop
      if is_degr:
4141 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4142 a9e0c397 Iustin Pop
4143 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4144 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
4145 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
4146 a9e0c397 Iustin Pop
      for lv in old_lvs:
4147 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4148 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
4149 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4150 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4151 a9e0c397 Iustin Pop
4152 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4153 a9e0c397 Iustin Pop
    """Execute disk replacement.
4154 a9e0c397 Iustin Pop

4155 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4156 a9e0c397 Iustin Pop

4157 a9e0c397 Iustin Pop
    """
4158 a9e0c397 Iustin Pop
    instance = self.instance
4159 abdf0113 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
4160 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4161 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4162 a9e0c397 Iustin Pop
      else:
4163 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4164 a9e0c397 Iustin Pop
    else:
4165 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4166 a9e0c397 Iustin Pop
    return fn(feedback_fn)
4167 a9e0c397 Iustin Pop
4168 a8083063 Iustin Pop
4169 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4170 a8083063 Iustin Pop
  """Query runtime instance data.
4171 a8083063 Iustin Pop

4172 a8083063 Iustin Pop
  """
4173 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
4174 a8083063 Iustin Pop
4175 a8083063 Iustin Pop
  def CheckPrereq(self):
4176 a8083063 Iustin Pop
    """Check prerequisites.
4177 a8083063 Iustin Pop

4178 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4179 a8083063 Iustin Pop

4180 a8083063 Iustin Pop
    """
4181 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
4182 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4183 a8083063 Iustin Pop
    if self.op.instances:
4184 a8083063 Iustin Pop
      self.wanted_instances = []
4185 a8083063 Iustin Pop
      names = self.op.instances
4186 a8083063 Iustin Pop
      for name in names:
4187 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
4188 a8083063 Iustin Pop
        if instance is None:
4189 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
4190 515207af Guido Trotter
        self.wanted_instances.append(instance)
4191 a8083063 Iustin Pop
    else:
4192 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4193 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
4194 a8083063 Iustin Pop
    return
4195 a8083063 Iustin Pop
4196 a8083063 Iustin Pop
4197 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4198 a8083063 Iustin Pop
    """Compute block device status.
4199 a8083063 Iustin Pop

4200 a8083063 Iustin Pop
    """
4201 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4202 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4203 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4204 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4205 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4206 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4207 a8083063 Iustin Pop
      else:
4208 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4209 a8083063 Iustin Pop
4210 a8083063 Iustin Pop
    if snode:
4211 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4212 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4213 a8083063 Iustin Pop
    else:
4214 a8083063 Iustin Pop
      dev_sstatus = None
4215 a8083063 Iustin Pop
4216 a8083063 Iustin Pop
    if dev.children:
4217 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4218 a8083063 Iustin Pop
                      for child in dev.children]
4219 a8083063 Iustin Pop
    else:
4220 a8083063 Iustin Pop
      dev_children = []
4221 a8083063 Iustin Pop
4222 a8083063 Iustin Pop
    data = {
4223 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4224 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4225 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4226 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4227 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4228 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4229 a8083063 Iustin Pop
      "children": dev_children,
4230 a8083063 Iustin Pop
      }
4231 a8083063 Iustin Pop
4232 a8083063 Iustin Pop
    return data
4233 a8083063 Iustin Pop
4234 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4235 a8083063 Iustin Pop
    """Gather and return data"""
4236 a8083063 Iustin Pop
    result = {}
4237 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4238 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4239 a8083063 Iustin Pop
                                                instance.name)
4240 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4241 a8083063 Iustin Pop
        remote_state = "up"
4242 a8083063 Iustin Pop
      else:
4243 a8083063 Iustin Pop
        remote_state = "down"
4244 a8083063 Iustin Pop
      if instance.status == "down":
4245 a8083063 Iustin Pop
        config_state = "down"
4246 a8083063 Iustin Pop
      else:
4247 a8083063 Iustin Pop
        config_state = "up"
4248 a8083063 Iustin Pop
4249 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4250 a8083063 Iustin Pop
               for device in instance.disks]
4251 a8083063 Iustin Pop
4252 a8083063 Iustin Pop
      idict = {
4253 a8083063 Iustin Pop
        "name": instance.name,
4254 a8083063 Iustin Pop
        "config_state": config_state,
4255 a8083063 Iustin Pop
        "run_state": remote_state,
4256 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4257 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4258 a8083063 Iustin Pop
        "os": instance.os,
4259 a8083063 Iustin Pop
        "memory": instance.memory,
4260 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4261 a8083063 Iustin Pop
        "disks": disks,
4262 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4263 a8083063 Iustin Pop
        }
4264 a8083063 Iustin Pop
4265 a8340917 Iustin Pop
      htkind = self.sstore.GetHypervisorType()
4266 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_PVM30:
4267 a8340917 Iustin Pop
        idict["kernel_path"] = instance.kernel_path
4268 a8340917 Iustin Pop
        idict["initrd_path"] = instance.initrd_path
4269 a8340917 Iustin Pop
4270 a8340917 Iustin Pop
      if htkind == constants.HT_XEN_HVM31:
4271 a8340917 Iustin Pop
        idict["hvm_boot_order"] = instance.hvm_boot_order
4272 a8340917 Iustin Pop
        idict["hvm_acpi"] = instance.hvm_acpi
4273 a8340917 Iustin Pop
        idict["hvm_pae"] = instance.hvm_pae
4274 a8340917 Iustin Pop
        idict["hvm_cdrom_image_path"] = instance.hvm_cdrom_image_path
4275 a8340917 Iustin Pop
4276 a8340917 Iustin Pop
      if htkind in constants.HTS_REQ_PORT:
4277 a8340917 Iustin Pop
        idict["vnc_bind_address"] = instance.vnc_bind_address
4278 a8340917 Iustin Pop
        idict["network_port"] = instance.network_port
4279 a8340917 Iustin Pop
4280 a8083063 Iustin Pop
      result[instance.name] = idict
4281 a8083063 Iustin Pop
4282 a8083063 Iustin Pop
    return result
4283 a8083063 Iustin Pop
4284 a8083063 Iustin Pop
4285 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4286 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4287 a8083063 Iustin Pop

4288 a8083063 Iustin Pop
  """
4289 a8083063 Iustin Pop
  HPATH = "instance-modify"
4290 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4291 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4292 a8083063 Iustin Pop
4293 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4294 a8083063 Iustin Pop
    """Build hooks env.
4295 a8083063 Iustin Pop

4296 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4297 a8083063 Iustin Pop

4298 a8083063 Iustin Pop
    """
4299 396e1b78 Michael Hanselmann
    args = dict()
4300 a8083063 Iustin Pop
    if self.mem:
4301 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4302 a8083063 Iustin Pop
    if self.vcpus:
4303 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4304 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4305 396e1b78 Michael Hanselmann
      if self.do_ip:
4306 396e1b78 Michael Hanselmann
        ip = self.ip
4307 396e1b78 Michael Hanselmann
      else:
4308 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4309 396e1b78 Michael Hanselmann
      if self.bridge:
4310 396e1b78 Michael Hanselmann
        bridge = self.bridge
4311 396e1b78 Michael Hanselmann
      else:
4312 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4313 ef756965 Iustin Pop
      if self.mac:
4314 ef756965 Iustin Pop
        mac = self.mac
4315 ef756965 Iustin Pop
      else:
4316 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4317 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4318 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4319 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4320 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4321 a8083063 Iustin Pop
    return env, nl, nl
4322 a8083063 Iustin Pop
4323 a8083063 Iustin Pop
  def CheckPrereq(self):
4324 a8083063 Iustin Pop
    """Check prerequisites.
4325 a8083063 Iustin Pop

4326 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4327 a8083063 Iustin Pop

4328 a8083063 Iustin Pop
    """
4329 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4330 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4331 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4332 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4333 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4334 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4335 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4336 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4337 31a853d2 Iustin Pop
    self.hvm_acpi = getattr(self.op, "hvm_acpi", None)
4338 31a853d2 Iustin Pop
    self.hvm_pae = getattr(self.op, "hvm_pae", None)
4339 31a853d2 Iustin Pop
    self.hvm_cdrom_image_path = getattr(self.op, "hvm_cdrom_image_path", None)
4340 31a853d2 Iustin Pop
    self.vnc_bind_address = getattr(self.op, "vnc_bind_address", None)
4341 31a853d2 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4342 31a853d2 Iustin Pop
                 self.kernel_path, self.initrd_path, self.hvm_boot_order,
4343 31a853d2 Iustin Pop
                 self.hvm_acpi, self.hvm_pae, self.hvm_cdrom_image_path,
4344 31a853d2 Iustin Pop
                 self.vnc_bind_address]
4345 31a853d2 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4346 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4347 a8083063 Iustin Pop
    if self.mem is not None:
4348 a8083063 Iustin Pop
      try:
4349 a8083063 Iustin Pop
        self.mem = int(self.mem)
4350 a8083063 Iustin Pop
      except ValueError, err:
4351 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4352 a8083063 Iustin Pop
    if self.vcpus is not None:
4353 a8083063 Iustin Pop
      try:
4354 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4355 a8083063 Iustin Pop
      except ValueError, err:
4356 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4357 a8083063 Iustin Pop
    if self.ip is not None:
4358 a8083063 Iustin Pop
      self.do_ip = True
4359 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4360 a8083063 Iustin Pop
        self.ip = None
4361 a8083063 Iustin Pop
      else:
4362 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4363 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4364 a8083063 Iustin Pop
    else:
4365 a8083063 Iustin Pop
      self.do_ip = False
4366 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4367 1862d460 Alexander Schreiber
    if self.mac is not None:
4368 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4369 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4370 1862d460 Alexander Schreiber
                                   self.mac)
4371 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4372 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4373 a8083063 Iustin Pop
4374 973d7867 Iustin Pop
    if self.kernel_path is not None:
4375 973d7867 Iustin Pop
      self.do_kernel_path = True
4376 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4377 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4378 973d7867 Iustin Pop
4379 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4380 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4381 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4382 973d7867 Iustin Pop
                                    " filename")
4383 8cafeb26 Iustin Pop
    else:
4384 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4385 973d7867 Iustin Pop
4386 973d7867 Iustin Pop
    if self.initrd_path is not None:
4387 973d7867 Iustin Pop
      self.do_initrd_path = True
4388 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4389 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4390 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4391 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4392 973d7867 Iustin Pop
                                    " filename")
4393 8cafeb26 Iustin Pop
    else:
4394 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4395 973d7867 Iustin Pop
4396 25c5878d Alexander Schreiber
    # boot order verification
4397 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4398 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4399 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4400 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4401 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4402 25c5878d Alexander Schreiber
                                     " or 'default'")
4403 25c5878d Alexander Schreiber
4404 31a853d2 Iustin Pop
    # hvm_cdrom_image_path verification
4405 31a853d2 Iustin Pop
    if self.op.hvm_cdrom_image_path is not None:
4406 31a853d2 Iustin Pop
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
4407 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
4408 31a853d2 Iustin Pop
                                   " be an absolute path or None, not %s" %
4409 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4410 31a853d2 Iustin Pop
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
4411 31a853d2 Iustin Pop
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
4412 31a853d2 Iustin Pop
                                   " regular file or a symlink pointing to"
4413 31a853d2 Iustin Pop
                                   " an existing regular file, not %s" %
4414 31a853d2 Iustin Pop
                                   self.op.hvm_cdrom_image_path)
4415 31a853d2 Iustin Pop
4416 31a853d2 Iustin Pop
    # vnc_bind_address verification
4417 31a853d2 Iustin Pop
    if self.op.vnc_bind_address is not None:
4418 31a853d2 Iustin Pop
      if not utils.IsValidIP(self.op.vnc_bind_address):
4419 31a853d2 Iustin Pop
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
4420 31a853d2 Iustin Pop
                                   " like a valid IP address" %
4421 31a853d2 Iustin Pop
                                   self.op.vnc_bind_address)
4422 31a853d2 Iustin Pop
4423 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4424 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
4425 a8083063 Iustin Pop
    if instance is None:
4426 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
4427 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4428 a8083063 Iustin Pop
    self.op.instance_name = instance.name
4429 a8083063 Iustin Pop
    self.instance = instance
4430 a8083063 Iustin Pop
    return
4431 a8083063 Iustin Pop
4432 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4433 a8083063 Iustin Pop
    """Modifies an instance.
4434 a8083063 Iustin Pop

4435 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4436 a8083063 Iustin Pop
    """
4437 a8083063 Iustin Pop
    result = []
4438 a8083063 Iustin Pop
    instance = self.instance
4439 a8083063 Iustin Pop
    if self.mem:
4440 a8083063 Iustin Pop
      instance.memory = self.mem
4441 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4442 a8083063 Iustin Pop
    if self.vcpus:
4443 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4444 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4445 a8083063 Iustin Pop
    if self.do_ip:
4446 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4447 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4448 a8083063 Iustin Pop
    if self.bridge:
4449 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4450 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4451 1862d460 Alexander Schreiber
    if self.mac:
4452 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4453 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4454 973d7867 Iustin Pop
    if self.do_kernel_path:
4455 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4456 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4457 973d7867 Iustin Pop
    if self.do_initrd_path:
4458 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4459 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4460 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4461 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4462 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4463 25c5878d Alexander Schreiber
      else:
4464 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4465 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4466 31a853d2 Iustin Pop
    if self.hvm_acpi:
4467 ec1ba002 Iustin Pop
      instance.hvm_acpi = self.hvm_acpi
4468 31a853d2 Iustin Pop
      result.append(("hvm_acpi", self.hvm_acpi))
4469 31a853d2 Iustin Pop
    if self.hvm_pae:
4470 ec1ba002 Iustin Pop
      instance.hvm_pae = self.hvm_pae
4471 31a853d2 Iustin Pop
      result.append(("hvm_pae", self.hvm_pae))
4472 31a853d2 Iustin Pop
    if self.hvm_cdrom_image_path:
4473 ec1ba002 Iustin Pop
      instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
4474 31a853d2 Iustin Pop
      result.append(("hvm_cdrom_image_path", self.hvm_cdrom_image_path))
4475 31a853d2 Iustin Pop
    if self.vnc_bind_address:
4476 31a853d2 Iustin Pop
      instance.vnc_bind_address = self.vnc_bind_address
4477 31a853d2 Iustin Pop
      result.append(("vnc_bind_address", self.vnc_bind_address))
4478 a8083063 Iustin Pop
4479 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
4480 a8083063 Iustin Pop
4481 a8083063 Iustin Pop
    return result
4482 a8083063 Iustin Pop
4483 a8083063 Iustin Pop
4484 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4485 a8083063 Iustin Pop
  """Query the exports list
4486 a8083063 Iustin Pop

4487 a8083063 Iustin Pop
  """
4488 a8083063 Iustin Pop
  _OP_REQP = []
4489 a8083063 Iustin Pop
4490 a8083063 Iustin Pop
  def CheckPrereq(self):
4491 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4492 a8083063 Iustin Pop

4493 a8083063 Iustin Pop
    """
4494 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4495 a8083063 Iustin Pop
4496 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4497 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4498 a8083063 Iustin Pop

4499 a8083063 Iustin Pop
    Returns:
4500 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4501 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4502 a8083063 Iustin Pop
      that node.
4503 a8083063 Iustin Pop

4504 a8083063 Iustin Pop
    """
4505 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4506 a8083063 Iustin Pop
4507 a8083063 Iustin Pop
4508 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4509 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4510 a8083063 Iustin Pop

4511 a8083063 Iustin Pop
  """
4512 a8083063 Iustin Pop
  HPATH = "instance-export"
4513 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4514 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4515 a8083063 Iustin Pop
4516 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4517 a8083063 Iustin Pop
    """Build hooks env.
4518 a8083063 Iustin Pop

4519 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4520 a8083063 Iustin Pop

4521 a8083063 Iustin Pop
    """
4522 a8083063 Iustin Pop
    env = {
4523 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4524 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4525 a8083063 Iustin Pop
      }
4526 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4527 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4528 a8083063 Iustin Pop
          self.op.target_node]
4529 a8083063 Iustin Pop
    return env, nl, nl
4530 a8083063 Iustin Pop
4531 a8083063 Iustin Pop
  def CheckPrereq(self):
4532 a8083063 Iustin Pop
    """Check prerequisites.
4533 a8083063 Iustin Pop

4534 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4535 a8083063 Iustin Pop

4536 a8083063 Iustin Pop
    """
4537 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4538 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4539 a8083063 Iustin Pop
    if self.instance is None:
4540 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4541 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4542 a8083063 Iustin Pop
4543 a8083063 Iustin Pop
    # node verification
4544 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4545 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4546 a8083063 Iustin Pop
4547 a8083063 Iustin Pop
    if self.dst_node is None:
4548 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4549 3ecf6786 Iustin Pop
                                 self.op.target_node)
4550 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4551 a8083063 Iustin Pop
4552 b6023d6c Manuel Franceschini
    # instance disk type verification
4553 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4554 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4555 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4556 b6023d6c Manuel Franceschini
                                   " file-based disks")
4557 b6023d6c Manuel Franceschini
4558 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4559 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4560 a8083063 Iustin Pop

4561 a8083063 Iustin Pop
    """
4562 a8083063 Iustin Pop
    instance = self.instance
4563 a8083063 Iustin Pop
    dst_node = self.dst_node
4564 a8083063 Iustin Pop
    src_node = instance.primary_node
4565 a8083063 Iustin Pop
    if self.op.shutdown:
4566 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4567 fb300fb7 Guido Trotter
      if not rpc.call_instance_shutdown(src_node, instance):
4568 fb300fb7 Guido Trotter
         raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4569 b4de68a9 Iustin Pop
                                  (instance.name, src_node))
4570 a8083063 Iustin Pop
4571 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4572 a8083063 Iustin Pop
4573 a8083063 Iustin Pop
    snap_disks = []
4574 a8083063 Iustin Pop
4575 a8083063 Iustin Pop
    try:
4576 a8083063 Iustin Pop
      for disk in instance.disks:
4577 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4578 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4579 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4580 a8083063 Iustin Pop
4581 a8083063 Iustin Pop
          if not new_dev_name:
4582 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4583 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4584 a8083063 Iustin Pop
          else:
4585 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4586 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4587 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4588 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4589 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4590 a8083063 Iustin Pop
4591 a8083063 Iustin Pop
    finally:
4592 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4593 fb300fb7 Guido Trotter
        if not rpc.call_instance_start(src_node, instance, None):
4594 fb300fb7 Guido Trotter
          _ShutdownInstanceDisks(instance, self.cfg)
4595 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4596 a8083063 Iustin Pop
4597 a8083063 Iustin Pop
    # TODO: check for size
4598 a8083063 Iustin Pop
4599 a8083063 Iustin Pop
    for dev in snap_disks:
4600 16687b98 Manuel Franceschini
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
4601 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4602 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4603 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4604 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4605 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4606 a8083063 Iustin Pop
4607 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4608 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4609 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4610 a8083063 Iustin Pop
4611 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4612 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4613 a8083063 Iustin Pop
4614 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4615 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4616 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4617 a8083063 Iustin Pop
    if nodelist:
4618 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4619 5bfac263 Iustin Pop
      exportlist = self.proc.ChainOpCode(op)
4620 a8083063 Iustin Pop
      for node in exportlist:
4621 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4622 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4623 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4624 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4625 5c947f38 Iustin Pop
4626 5c947f38 Iustin Pop
4627 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4628 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4629 9ac99fda Guido Trotter

4630 9ac99fda Guido Trotter
  """
4631 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4632 9ac99fda Guido Trotter
4633 9ac99fda Guido Trotter
  def CheckPrereq(self):
4634 9ac99fda Guido Trotter
    """Check prerequisites.
4635 9ac99fda Guido Trotter
    """
4636 9ac99fda Guido Trotter
    pass
4637 9ac99fda Guido Trotter
4638 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4639 9ac99fda Guido Trotter
    """Remove any export.
4640 9ac99fda Guido Trotter

4641 9ac99fda Guido Trotter
    """
4642 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4643 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4644 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4645 9ac99fda Guido Trotter
    fqdn_warn = False
4646 9ac99fda Guido Trotter
    if not instance_name:
4647 9ac99fda Guido Trotter
      fqdn_warn = True
4648 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4649 9ac99fda Guido Trotter
4650 9ac99fda Guido Trotter
    op = opcodes.OpQueryExports(nodes=[])
4651 9ac99fda Guido Trotter
    exportlist = self.proc.ChainOpCode(op)
4652 9ac99fda Guido Trotter
    found = False
4653 9ac99fda Guido Trotter
    for node in exportlist:
4654 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4655 9ac99fda Guido Trotter
        found = True
4656 9ac99fda Guido Trotter
        if not rpc.call_export_remove(node, instance_name):
4657 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4658 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4659 9ac99fda Guido Trotter
4660 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4661 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4662 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4663 9ac99fda Guido Trotter
                  " Domain Name.")
4664 9ac99fda Guido Trotter
4665 9ac99fda Guido Trotter
4666 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4667 5c947f38 Iustin Pop
  """Generic tags LU.
4668 5c947f38 Iustin Pop

4669 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4670 5c947f38 Iustin Pop

4671 5c947f38 Iustin Pop
  """
4672 5c947f38 Iustin Pop
  def CheckPrereq(self):
4673 5c947f38 Iustin Pop
    """Check prerequisites.
4674 5c947f38 Iustin Pop

4675 5c947f38 Iustin Pop
    """
4676 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4677 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4678 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4679 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4680 5c947f38 Iustin Pop
      if name is None:
4681 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4682 3ecf6786 Iustin Pop
                                   (self.op.name,))
4683 5c947f38 Iustin Pop
      self.op.name = name
4684 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4685 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4686 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4687 5c947f38 Iustin Pop
      if name is None:
4688 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4689 3ecf6786 Iustin Pop
                                   (self.op.name,))
4690 5c947f38 Iustin Pop
      self.op.name = name
4691 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4692 5c947f38 Iustin Pop
    else:
4693 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4694 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4695 5c947f38 Iustin Pop
4696 5c947f38 Iustin Pop
4697 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4698 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4699 5c947f38 Iustin Pop

4700 5c947f38 Iustin Pop
  """
4701 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4702 5c947f38 Iustin Pop
4703 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4704 5c947f38 Iustin Pop
    """Returns the tag list.
4705 5c947f38 Iustin Pop

4706 5c947f38 Iustin Pop
    """
4707 5c947f38 Iustin Pop
    return self.target.GetTags()
4708 5c947f38 Iustin Pop
4709 5c947f38 Iustin Pop
4710 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4711 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4712 73415719 Iustin Pop

4713 73415719 Iustin Pop
  """
4714 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4715 73415719 Iustin Pop
4716 73415719 Iustin Pop
  def CheckPrereq(self):
4717 73415719 Iustin Pop
    """Check prerequisites.
4718 73415719 Iustin Pop

4719 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4720 73415719 Iustin Pop

4721 73415719 Iustin Pop
    """
4722 73415719 Iustin Pop
    try:
4723 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4724 73415719 Iustin Pop
    except re.error, err:
4725 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4726 73415719 Iustin Pop
                                 (self.op.pattern, err))
4727 73415719 Iustin Pop
4728 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4729 73415719 Iustin Pop
    """Returns the tag list.
4730 73415719 Iustin Pop

4731 73415719 Iustin Pop
    """
4732 73415719 Iustin Pop
    cfg = self.cfg
4733 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4734 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4735 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4736 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4737 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4738 73415719 Iustin Pop
    results = []
4739 73415719 Iustin Pop
    for path, target in tgts:
4740 73415719 Iustin Pop
      for tag in target.GetTags():
4741 73415719 Iustin Pop
        if self.re.search(tag):
4742 73415719 Iustin Pop
          results.append((path, tag))
4743 73415719 Iustin Pop
    return results
4744 73415719 Iustin Pop
4745 73415719 Iustin Pop
4746 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4747 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4748 5c947f38 Iustin Pop

4749 5c947f38 Iustin Pop
  """
4750 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4751 5c947f38 Iustin Pop
4752 5c947f38 Iustin Pop
  def CheckPrereq(self):
4753 5c947f38 Iustin Pop
    """Check prerequisites.
4754 5c947f38 Iustin Pop

4755 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4756 5c947f38 Iustin Pop

4757 5c947f38 Iustin Pop
    """
4758 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4759 f27302fa Iustin Pop
    for tag in self.op.tags:
4760 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4761 5c947f38 Iustin Pop
4762 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4763 5c947f38 Iustin Pop
    """Sets the tag.
4764 5c947f38 Iustin Pop

4765 5c947f38 Iustin Pop
    """
4766 5c947f38 Iustin Pop
    try:
4767 f27302fa Iustin Pop
      for tag in self.op.tags:
4768 f27302fa Iustin Pop
        self.target.AddTag(tag)
4769 5c947f38 Iustin Pop
    except errors.TagError, err:
4770 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4771 5c947f38 Iustin Pop
    try:
4772 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4773 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4774 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4775 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4776 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4777 5c947f38 Iustin Pop
4778 5c947f38 Iustin Pop
4779 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4780 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4781 5c947f38 Iustin Pop

4782 5c947f38 Iustin Pop
  """
4783 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4784 5c947f38 Iustin Pop
4785 5c947f38 Iustin Pop
  def CheckPrereq(self):
4786 5c947f38 Iustin Pop
    """Check prerequisites.
4787 5c947f38 Iustin Pop

4788 5c947f38 Iustin Pop
    This checks that we have the given tag.
4789 5c947f38 Iustin Pop

4790 5c947f38 Iustin Pop
    """
4791 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4792 f27302fa Iustin Pop
    for tag in self.op.tags:
4793 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4794 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4795 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4796 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4797 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4798 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4799 f27302fa Iustin Pop
      diff_names.sort()
4800 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4801 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4802 5c947f38 Iustin Pop
4803 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4804 5c947f38 Iustin Pop
    """Remove the tag from the object.
4805 5c947f38 Iustin Pop

4806 5c947f38 Iustin Pop
    """
4807 f27302fa Iustin Pop
    for tag in self.op.tags:
4808 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4809 5c947f38 Iustin Pop
    try:
4810 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4811 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4812 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4813 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4814 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4815 06009e27 Iustin Pop
4816 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
4817 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
4818 06009e27 Iustin Pop

4819 06009e27 Iustin Pop
  This LU sleeps on the master and/or nodes for a specified amoutn of
4820 06009e27 Iustin Pop
  time.
4821 06009e27 Iustin Pop

4822 06009e27 Iustin Pop
  """
4823 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
4824 06009e27 Iustin Pop
4825 06009e27 Iustin Pop
  def CheckPrereq(self):
4826 06009e27 Iustin Pop
    """Check prerequisites.
4827 06009e27 Iustin Pop

4828 06009e27 Iustin Pop
    This checks that we have a good list of nodes and/or the duration
4829 06009e27 Iustin Pop
    is valid.
4830 06009e27 Iustin Pop

4831 06009e27 Iustin Pop
    """
4832 06009e27 Iustin Pop
4833 06009e27 Iustin Pop
    if self.op.on_nodes:
4834 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
4835 06009e27 Iustin Pop
4836 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
4837 06009e27 Iustin Pop
    """Do the actual sleep.
4838 06009e27 Iustin Pop

4839 06009e27 Iustin Pop
    """
4840 06009e27 Iustin Pop
    if self.op.on_master:
4841 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
4842 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
4843 06009e27 Iustin Pop
    if self.op.on_nodes:
4844 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
4845 06009e27 Iustin Pop
      if not result:
4846 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
4847 06009e27 Iustin Pop
      for node, node_result in result.items():
4848 06009e27 Iustin Pop
        if not node_result:
4849 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
4850 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
4851 d61df03e Iustin Pop
4852 d61df03e Iustin Pop
4853 d1c2dd75 Iustin Pop
class IAllocator(object):
4854 d1c2dd75 Iustin Pop
  """IAllocator framework.
4855 d61df03e Iustin Pop

4856 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
4857 d1c2dd75 Iustin Pop
    - cfg/sstore that are needed to query the cluster
4858 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
4859 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
4860 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
4861 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
4862 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
4863 d1c2dd75 Iustin Pop
      easy usage
4864 d61df03e Iustin Pop

4865 d61df03e Iustin Pop
  """
4866 29859cb7 Iustin Pop
  _ALLO_KEYS = [
4867 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
4868 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
4869 d1c2dd75 Iustin Pop
    ]
4870 29859cb7 Iustin Pop
  _RELO_KEYS = [
4871 29859cb7 Iustin Pop
    "relocate_from",
4872 29859cb7 Iustin Pop
    ]
4873 d1c2dd75 Iustin Pop
4874 29859cb7 Iustin Pop
  def __init__(self, cfg, sstore, mode, name, **kwargs):
4875 d1c2dd75 Iustin Pop
    self.cfg = cfg
4876 d1c2dd75 Iustin Pop
    self.sstore = sstore
4877 d1c2dd75 Iustin Pop
    # init buffer variables
4878 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
4879 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
4880 29859cb7 Iustin Pop
    self.mode = mode
4881 29859cb7 Iustin Pop
    self.name = name
4882 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
4883 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
4884 29859cb7 Iustin Pop
    self.relocate_from = None
4885 27579978 Iustin Pop
    # computed fields
4886 27579978 Iustin Pop
    self.required_nodes = None
4887 d1c2dd75 Iustin Pop
    # init result fields
4888 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
4889 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
4890 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
4891 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
4892 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
4893 29859cb7 Iustin Pop
    else:
4894 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
4895 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
4896 d1c2dd75 Iustin Pop
    for key in kwargs:
4897 29859cb7 Iustin Pop
      if key not in keyset:
4898 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
4899 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4900 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
4901 29859cb7 Iustin Pop
    for key in keyset:
4902 d1c2dd75 Iustin Pop
      if key not in kwargs:
4903 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
4904 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4905 d1c2dd75 Iustin Pop
    self._BuildInputData()
4906 d1c2dd75 Iustin Pop
4907 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
4908 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
4909 d1c2dd75 Iustin Pop

4910 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
4911 d1c2dd75 Iustin Pop

4912 d1c2dd75 Iustin Pop
    """
4913 d1c2dd75 Iustin Pop
    cfg = self.cfg
4914 d1c2dd75 Iustin Pop
    # cluster data
4915 d1c2dd75 Iustin Pop
    data = {
4916 d1c2dd75 Iustin Pop
      "version": 1,
4917 d1c2dd75 Iustin Pop
      "cluster_name": self.sstore.GetClusterName(),
4918 d1c2dd75 Iustin Pop
      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
4919 6286519f Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
4920 d1c2dd75 Iustin Pop
      # we don't have job IDs
4921 d61df03e Iustin Pop
      }
4922 d61df03e Iustin Pop
4923 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
4924 6286519f Iustin Pop
4925 d1c2dd75 Iustin Pop
    # node data
4926 d1c2dd75 Iustin Pop
    node_results = {}
4927 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
4928 d1c2dd75 Iustin Pop
    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
4929 d1c2dd75 Iustin Pop
    for nname in node_list:
4930 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
4931 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
4932 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
4933 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
4934 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
4935 4337cf1b Iustin Pop
                   'vg_size', 'vg_free', 'cpu_total']:
4936 d1c2dd75 Iustin Pop
        if attr not in remote_info:
4937 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
4938 d1c2dd75 Iustin Pop
                                   (nname, attr))
4939 d1c2dd75 Iustin Pop
        try:
4940 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
4941 d1c2dd75 Iustin Pop
        except ValueError, err:
4942 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
4943 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
4944 6286519f Iustin Pop
      # compute memory used by primary instances
4945 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
4946 6286519f Iustin Pop
      for iinfo in i_list:
4947 6286519f Iustin Pop
        if iinfo.primary_node == nname:
4948 6286519f Iustin Pop
          i_p_mem += iinfo.memory
4949 6286519f Iustin Pop
          if iinfo.status == "up":
4950 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
4951 6286519f Iustin Pop
4952 b2662e7f Iustin Pop
      # compute memory used by instances
4953 d1c2dd75 Iustin Pop
      pnr = {
4954 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
4955 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
4956 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
4957 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
4958 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
4959 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
4960 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
4961 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
4962 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
4963 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
4964 4337cf1b Iustin Pop
        "total_cpus": remote_info['cpu_total'],
4965 d1c2dd75 Iustin Pop
        }
4966 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
4967 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
4968 d1c2dd75 Iustin Pop
4969 d1c2dd75 Iustin Pop
    # instance data
4970 d1c2dd75 Iustin Pop
    instance_data = {}
4971 6286519f Iustin Pop
    for iinfo in i_list:
4972 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
4973 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
4974 d1c2dd75 Iustin Pop
      pir = {
4975 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
4976 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
4977 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
4978 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
4979 d1c2dd75 Iustin Pop
        "os": iinfo.os,
4980 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
4981 d1c2dd75 Iustin Pop
        "nics": nic_data,
4982 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
4983 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
4984 d1c2dd75 Iustin Pop
        }
4985 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
4986 d61df03e Iustin Pop
4987 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
4988 d61df03e Iustin Pop
4989 d1c2dd75 Iustin Pop
    self.in_data = data
4990 d61df03e Iustin Pop
4991 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
4992 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
4993 d61df03e Iustin Pop

4994 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
4995 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
4996 d61df03e Iustin Pop

4997 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
4998 d1c2dd75 Iustin Pop
    done.
4999 d61df03e Iustin Pop

5000 d1c2dd75 Iustin Pop
    """
5001 d1c2dd75 Iustin Pop
    data = self.in_data
5002 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
5003 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
5004 d1c2dd75 Iustin Pop
5005 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
5006 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
5007 d1c2dd75 Iustin Pop
5008 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
5009 27579978 Iustin Pop
      self.required_nodes = 2
5010 27579978 Iustin Pop
    else:
5011 27579978 Iustin Pop
      self.required_nodes = 1
5012 d1c2dd75 Iustin Pop
    request = {
5013 d1c2dd75 Iustin Pop
      "type": "allocate",
5014 d1c2dd75 Iustin Pop
      "name": self.name,
5015 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
5016 d1c2dd75 Iustin Pop
      "tags": self.tags,
5017 d1c2dd75 Iustin Pop
      "os": self.os,
5018 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
5019 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
5020 d1c2dd75 Iustin Pop
      "disks": self.disks,
5021 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
5022 d1c2dd75 Iustin Pop
      "nics": self.nics,
5023 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5024 d1c2dd75 Iustin Pop
      }
5025 d1c2dd75 Iustin Pop
    data["request"] = request
5026 298fe380 Iustin Pop
5027 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
5028 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
5029 298fe380 Iustin Pop

5030 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
5031 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
5032 d61df03e Iustin Pop

5033 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
5034 d1c2dd75 Iustin Pop
    done.
5035 d61df03e Iustin Pop

5036 d1c2dd75 Iustin Pop
    """
5037 27579978 Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.name)
5038 27579978 Iustin Pop
    if instance is None:
5039 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5040 27579978 Iustin Pop
                                   " IAllocator" % self.name)
5041 27579978 Iustin Pop
5042 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5043 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5044 27579978 Iustin Pop
5045 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5046 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5047 2a139bb0 Iustin Pop
5048 27579978 Iustin Pop
    self.required_nodes = 1
5049 27579978 Iustin Pop
5050 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
5051 27579978 Iustin Pop
                                  instance.disks[0].size,
5052 27579978 Iustin Pop
                                  instance.disks[1].size)
5053 27579978 Iustin Pop
5054 d1c2dd75 Iustin Pop
    request = {
5055 2a139bb0 Iustin Pop
      "type": "relocate",
5056 d1c2dd75 Iustin Pop
      "name": self.name,
5057 27579978 Iustin Pop
      "disk_space_total": disk_space,
5058 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
5059 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5060 d1c2dd75 Iustin Pop
      }
5061 27579978 Iustin Pop
    self.in_data["request"] = request
5062 d61df03e Iustin Pop
5063 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5064 d1c2dd75 Iustin Pop
    """Build input data structures.
5065 d61df03e Iustin Pop

5066 d1c2dd75 Iustin Pop
    """
5067 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5068 d61df03e Iustin Pop
5069 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5070 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5071 d1c2dd75 Iustin Pop
    else:
5072 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5073 d61df03e Iustin Pop
5074 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5075 d61df03e Iustin Pop
5076 8d528b7c Iustin Pop
  def Run(self, name, validate=True, call_fn=rpc.call_iallocator_runner):
5077 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5078 298fe380 Iustin Pop

5079 d1c2dd75 Iustin Pop
    """
5080 d1c2dd75 Iustin Pop
    data = self.in_text
5081 298fe380 Iustin Pop
5082 8d528b7c Iustin Pop
    result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
5083 298fe380 Iustin Pop
5084 8d528b7c Iustin Pop
    if not isinstance(result, tuple) or len(result) != 4:
5085 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5086 8d528b7c Iustin Pop
5087 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5088 8d528b7c Iustin Pop
5089 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5090 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5091 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5092 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Instance allocator call failed: %s,"
5093 d1c2dd75 Iustin Pop
                                 " output: %s" %
5094 8d528b7c Iustin Pop
                                 (fail, stdout+stderr))
5095 8d528b7c Iustin Pop
    self.out_text = stdout
5096 d1c2dd75 Iustin Pop
    if validate:
5097 d1c2dd75 Iustin Pop
      self._ValidateResult()
5098 298fe380 Iustin Pop
5099 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5100 d1c2dd75 Iustin Pop
    """Process the allocator results.
5101 538475ca Iustin Pop

5102 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5103 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5104 538475ca Iustin Pop

5105 d1c2dd75 Iustin Pop
    """
5106 d1c2dd75 Iustin Pop
    try:
5107 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5108 d1c2dd75 Iustin Pop
    except Exception, err:
5109 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5110 d1c2dd75 Iustin Pop
5111 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5112 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5113 538475ca Iustin Pop
5114 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5115 d1c2dd75 Iustin Pop
      if key not in rdict:
5116 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5117 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5118 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5119 538475ca Iustin Pop
5120 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5121 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5122 d1c2dd75 Iustin Pop
                               " is not a list")
5123 d1c2dd75 Iustin Pop
    self.out_data = rdict
5124 538475ca Iustin Pop
5125 538475ca Iustin Pop
5126 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5127 d61df03e Iustin Pop
  """Run allocator tests.
5128 d61df03e Iustin Pop

5129 d61df03e Iustin Pop
  This LU runs the allocator tests
5130 d61df03e Iustin Pop

5131 d61df03e Iustin Pop
  """
5132 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5133 d61df03e Iustin Pop
5134 d61df03e Iustin Pop
  def CheckPrereq(self):
5135 d61df03e Iustin Pop
    """Check prerequisites.
5136 d61df03e Iustin Pop

5137 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5138 d61df03e Iustin Pop

5139 d61df03e Iustin Pop
    """
5140 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5141 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5142 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5143 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5144 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5145 d61df03e Iustin Pop
                                     attr)
5146 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5147 d61df03e Iustin Pop
      if iname is not None:
5148 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5149 d61df03e Iustin Pop
                                   iname)
5150 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5151 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5152 d61df03e Iustin Pop
      for row in self.op.nics:
5153 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5154 d61df03e Iustin Pop
            "mac" not in row or
5155 d61df03e Iustin Pop
            "ip" not in row or
5156 d61df03e Iustin Pop
            "bridge" not in row):
5157 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5158 d61df03e Iustin Pop
                                     " 'nics' parameter")
5159 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5160 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5161 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5162 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5163 d61df03e Iustin Pop
      for row in self.op.disks:
5164 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5165 d61df03e Iustin Pop
            "size" not in row or
5166 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5167 d61df03e Iustin Pop
            "mode" not in row or
5168 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5169 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5170 d61df03e Iustin Pop
                                     " 'disks' parameter")
5171 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5172 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5173 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5174 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5175 d61df03e Iustin Pop
      if fname is None:
5176 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5177 d61df03e Iustin Pop
                                   self.op.name)
5178 d61df03e Iustin Pop
      self.op.name = fname
5179 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5180 d61df03e Iustin Pop
    else:
5181 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5182 d61df03e Iustin Pop
                                 self.op.mode)
5183 d61df03e Iustin Pop
5184 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5185 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5186 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5187 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5188 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5189 d61df03e Iustin Pop
                                 self.op.direction)
5190 d61df03e Iustin Pop
5191 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5192 d61df03e Iustin Pop
    """Run the allocator test.
5193 d61df03e Iustin Pop

5194 d61df03e Iustin Pop
    """
5195 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5196 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5197 29859cb7 Iustin Pop
                       mode=self.op.mode,
5198 29859cb7 Iustin Pop
                       name=self.op.name,
5199 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5200 29859cb7 Iustin Pop
                       disks=self.op.disks,
5201 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5202 29859cb7 Iustin Pop
                       os=self.op.os,
5203 29859cb7 Iustin Pop
                       tags=self.op.tags,
5204 29859cb7 Iustin Pop
                       nics=self.op.nics,
5205 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5206 29859cb7 Iustin Pop
                       )
5207 29859cb7 Iustin Pop
    else:
5208 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5209 29859cb7 Iustin Pop
                       mode=self.op.mode,
5210 29859cb7 Iustin Pop
                       name=self.op.name,
5211 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5212 29859cb7 Iustin Pop
                       )
5213 d61df03e Iustin Pop
5214 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5215 d1c2dd75 Iustin Pop
      result = ial.in_text
5216 298fe380 Iustin Pop
    else:
5217 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5218 d1c2dd75 Iustin Pop
      result = ial.out_text
5219 298fe380 Iustin Pop
    return result