Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ b74159ee

History | View | Annotate | Download (174 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 a8083063 Iustin Pop
from ganeti import config
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 a8083063 Iustin Pop
from ganeti import ssconf
45 8d14b30d Iustin Pop
from ganeti import serializer
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
53 a8083063 Iustin Pop
      with all the fields (even if as None)
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
58 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
59 a8083063 Iustin Pop

60 a8083063 Iustin Pop
  """
61 a8083063 Iustin Pop
  HPATH = None
62 a8083063 Iustin Pop
  HTYPE = None
63 a8083063 Iustin Pop
  _OP_REQP = []
64 a8083063 Iustin Pop
  REQ_CLUSTER = True
65 a8083063 Iustin Pop
  REQ_MASTER = True
66 a8083063 Iustin Pop
67 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
68 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
69 a8083063 Iustin Pop

70 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
71 a8083063 Iustin Pop
    validity.
72 a8083063 Iustin Pop

73 a8083063 Iustin Pop
    """
74 5bfac263 Iustin Pop
    self.proc = processor
75 a8083063 Iustin Pop
    self.op = op
76 a8083063 Iustin Pop
    self.cfg = cfg
77 a8083063 Iustin Pop
    self.sstore = sstore
78 c92b310a Michael Hanselmann
    self.__ssh = None
79 c92b310a Michael Hanselmann
80 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
81 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
82 a8083063 Iustin Pop
      if attr_val is None:
83 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
84 3ecf6786 Iustin Pop
                                   attr_name)
85 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
86 a8083063 Iustin Pop
      if not cfg.IsCluster():
87 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
88 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
89 a8083063 Iustin Pop
      if self.REQ_MASTER:
90 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
91 89e1fc26 Iustin Pop
        if master != utils.HostInfo().name:
92 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
93 3ecf6786 Iustin Pop
                                     " node %s" % master)
94 a8083063 Iustin Pop
95 c92b310a Michael Hanselmann
  def __GetSSH(self):
96 c92b310a Michael Hanselmann
    """Returns the SshRunner object
97 c92b310a Michael Hanselmann

98 c92b310a Michael Hanselmann
    """
99 c92b310a Michael Hanselmann
    if not self.__ssh:
100 1ff08570 Michael Hanselmann
      self.__ssh = ssh.SshRunner(self.sstore)
101 c92b310a Michael Hanselmann
    return self.__ssh
102 c92b310a Michael Hanselmann
103 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
104 c92b310a Michael Hanselmann
105 a8083063 Iustin Pop
  def CheckPrereq(self):
106 a8083063 Iustin Pop
    """Check prerequisites for this LU.
107 a8083063 Iustin Pop

108 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
109 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
110 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
111 a8083063 Iustin Pop
    allowed.
112 a8083063 Iustin Pop

113 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
114 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
115 a8083063 Iustin Pop

116 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
117 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
118 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
119 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
120 a8083063 Iustin Pop

121 a8083063 Iustin Pop
    """
122 a8083063 Iustin Pop
    raise NotImplementedError
123 a8083063 Iustin Pop
124 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
125 a8083063 Iustin Pop
    """Execute the LU.
126 a8083063 Iustin Pop

127 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
128 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
129 a8083063 Iustin Pop
    code, or expected.
130 a8083063 Iustin Pop

131 a8083063 Iustin Pop
    """
132 a8083063 Iustin Pop
    raise NotImplementedError
133 a8083063 Iustin Pop
134 a8083063 Iustin Pop
  def BuildHooksEnv(self):
135 a8083063 Iustin Pop
    """Build hooks environment for this LU.
136 a8083063 Iustin Pop

137 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
138 a8083063 Iustin Pop
    containing the environment that will be used for running the
139 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
140 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
141 a8083063 Iustin Pop
    the hook should run after the execution.
142 a8083063 Iustin Pop

143 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
144 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
145 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
146 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
147 a8083063 Iustin Pop

148 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
149 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
150 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
151 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
152 a8083063 Iustin Pop
    None).
153 a8083063 Iustin Pop

154 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
155 a8083063 Iustin Pop
    not be called.
156 a8083063 Iustin Pop

157 a8083063 Iustin Pop
    """
158 a8083063 Iustin Pop
    raise NotImplementedError
159 a8083063 Iustin Pop
160 a8083063 Iustin Pop
161 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
162 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
163 a8083063 Iustin Pop

164 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
165 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
166 a8083063 Iustin Pop

167 a8083063 Iustin Pop
  """
168 a8083063 Iustin Pop
  HPATH = None
169 a8083063 Iustin Pop
  HTYPE = None
170 a8083063 Iustin Pop
171 a8083063 Iustin Pop
  def BuildHooksEnv(self):
172 a8083063 Iustin Pop
    """Build hooks env.
173 a8083063 Iustin Pop

174 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
175 a8083063 Iustin Pop

176 a8083063 Iustin Pop
    """
177 0e137c28 Iustin Pop
    return {}, [], []
178 a8083063 Iustin Pop
179 a8083063 Iustin Pop
180 9440aeab Michael Hanselmann
def _AddHostToEtcHosts(hostname):
181 9440aeab Michael Hanselmann
  """Wrapper around utils.SetEtcHostsEntry.
182 9440aeab Michael Hanselmann

183 9440aeab Michael Hanselmann
  """
184 9440aeab Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
185 9440aeab Michael Hanselmann
  utils.SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
186 9440aeab Michael Hanselmann
187 9440aeab Michael Hanselmann
188 c8a0948f Michael Hanselmann
def _RemoveHostFromEtcHosts(hostname):
189 9440aeab Michael Hanselmann
  """Wrapper around utils.RemoveEtcHostsEntry.
190 c8a0948f Michael Hanselmann

191 c8a0948f Michael Hanselmann
  """
192 c8a0948f Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
193 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
194 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
195 c8a0948f Michael Hanselmann
196 c8a0948f Michael Hanselmann
197 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
198 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
199 83120a01 Michael Hanselmann

200 83120a01 Michael Hanselmann
  Args:
201 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
202 83120a01 Michael Hanselmann

203 83120a01 Michael Hanselmann
  """
204 3312b702 Iustin Pop
  if not isinstance(nodes, list):
205 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
206 dcb93971 Michael Hanselmann
207 dcb93971 Michael Hanselmann
  if nodes:
208 3312b702 Iustin Pop
    wanted = []
209 dcb93971 Michael Hanselmann
210 dcb93971 Michael Hanselmann
    for name in nodes:
211 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
212 dcb93971 Michael Hanselmann
      if node is None:
213 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
214 3312b702 Iustin Pop
      wanted.append(node)
215 dcb93971 Michael Hanselmann
216 dcb93971 Michael Hanselmann
  else:
217 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
218 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
219 3312b702 Iustin Pop
220 3312b702 Iustin Pop
221 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
222 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
223 3312b702 Iustin Pop

224 3312b702 Iustin Pop
  Args:
225 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
226 3312b702 Iustin Pop

227 3312b702 Iustin Pop
  """
228 3312b702 Iustin Pop
  if not isinstance(instances, list):
229 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
230 3312b702 Iustin Pop
231 3312b702 Iustin Pop
  if instances:
232 3312b702 Iustin Pop
    wanted = []
233 3312b702 Iustin Pop
234 3312b702 Iustin Pop
    for name in instances:
235 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
236 3312b702 Iustin Pop
      if instance is None:
237 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
238 3312b702 Iustin Pop
      wanted.append(instance)
239 3312b702 Iustin Pop
240 3312b702 Iustin Pop
  else:
241 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
242 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
243 dcb93971 Michael Hanselmann
244 dcb93971 Michael Hanselmann
245 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
246 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
247 83120a01 Michael Hanselmann

248 83120a01 Michael Hanselmann
  Args:
249 83120a01 Michael Hanselmann
    static: Static fields
250 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
251 83120a01 Michael Hanselmann

252 83120a01 Michael Hanselmann
  """
253 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
254 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
255 dcb93971 Michael Hanselmann
256 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
257 dcb93971 Michael Hanselmann
258 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
259 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
260 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
261 3ecf6786 Iustin Pop
                                          difference(all_fields)))
262 dcb93971 Michael Hanselmann
263 dcb93971 Michael Hanselmann
264 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
265 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
266 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
267 ecb215b5 Michael Hanselmann

268 ecb215b5 Michael Hanselmann
  Args:
269 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
270 396e1b78 Michael Hanselmann
  """
271 396e1b78 Michael Hanselmann
  env = {
272 0e137c28 Iustin Pop
    "OP_TARGET": name,
273 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
274 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
275 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
276 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
277 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
278 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
279 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
280 396e1b78 Michael Hanselmann
  }
281 396e1b78 Michael Hanselmann
282 396e1b78 Michael Hanselmann
  if nics:
283 396e1b78 Michael Hanselmann
    nic_count = len(nics)
284 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
285 396e1b78 Michael Hanselmann
      if ip is None:
286 396e1b78 Michael Hanselmann
        ip = ""
287 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
288 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
289 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
290 396e1b78 Michael Hanselmann
  else:
291 396e1b78 Michael Hanselmann
    nic_count = 0
292 396e1b78 Michael Hanselmann
293 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
294 396e1b78 Michael Hanselmann
295 396e1b78 Michael Hanselmann
  return env
296 396e1b78 Michael Hanselmann
297 396e1b78 Michael Hanselmann
298 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
299 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
300 ecb215b5 Michael Hanselmann

301 ecb215b5 Michael Hanselmann
  Args:
302 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
303 ecb215b5 Michael Hanselmann
    override: dict of values to override
304 ecb215b5 Michael Hanselmann
  """
305 396e1b78 Michael Hanselmann
  args = {
306 396e1b78 Michael Hanselmann
    'name': instance.name,
307 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
308 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
309 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
310 396e1b78 Michael Hanselmann
    'status': instance.os,
311 396e1b78 Michael Hanselmann
    'memory': instance.memory,
312 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
313 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
314 396e1b78 Michael Hanselmann
  }
315 396e1b78 Michael Hanselmann
  if override:
316 396e1b78 Michael Hanselmann
    args.update(override)
317 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
318 396e1b78 Michael Hanselmann
319 396e1b78 Michael Hanselmann
320 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
321 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
322 a8083063 Iustin Pop

323 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
324 a8083063 Iustin Pop
  is the error message.
325 a8083063 Iustin Pop

326 a8083063 Iustin Pop
  """
327 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
328 a8083063 Iustin Pop
  if vgsize is None:
329 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
330 a8083063 Iustin Pop
  elif vgsize < 20480:
331 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
332 191a8385 Guido Trotter
            (vgname, vgsize))
333 a8083063 Iustin Pop
  return None
334 a8083063 Iustin Pop
335 a8083063 Iustin Pop
336 a8083063 Iustin Pop
def _InitSSHSetup(node):
337 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
338 a8083063 Iustin Pop

339 a8083063 Iustin Pop

340 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
341 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
342 a8083063 Iustin Pop

343 a8083063 Iustin Pop
  Args:
344 a8083063 Iustin Pop
    node: the name of this host as a fqdn
345 a8083063 Iustin Pop

346 a8083063 Iustin Pop
  """
347 70d9e3d8 Iustin Pop
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
348 a8083063 Iustin Pop
349 70d9e3d8 Iustin Pop
  for name in priv_key, pub_key:
350 70d9e3d8 Iustin Pop
    if os.path.exists(name):
351 70d9e3d8 Iustin Pop
      utils.CreateBackup(name)
352 70d9e3d8 Iustin Pop
    utils.RemoveFile(name)
353 a8083063 Iustin Pop
354 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
355 70d9e3d8 Iustin Pop
                         "-f", priv_key,
356 a8083063 Iustin Pop
                         "-q", "-N", ""])
357 a8083063 Iustin Pop
  if result.failed:
358 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
359 3ecf6786 Iustin Pop
                             result.output)
360 a8083063 Iustin Pop
361 70d9e3d8 Iustin Pop
  f = open(pub_key, 'r')
362 a8083063 Iustin Pop
  try:
363 70d9e3d8 Iustin Pop
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
364 a8083063 Iustin Pop
  finally:
365 a8083063 Iustin Pop
    f.close()
366 a8083063 Iustin Pop
367 a8083063 Iustin Pop
368 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
369 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
370 a8083063 Iustin Pop

371 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
372 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
373 a8083063 Iustin Pop

374 a8083063 Iustin Pop
  """
375 a8083063 Iustin Pop
  # Create pseudo random password
376 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
377 a8083063 Iustin Pop
  # and write it into sstore
378 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
379 a8083063 Iustin Pop
380 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
381 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
382 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
383 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
384 a8083063 Iustin Pop
  if result.failed:
385 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
386 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
387 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
388 a8083063 Iustin Pop
389 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
390 a8083063 Iustin Pop
391 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
392 a8083063 Iustin Pop
393 a8083063 Iustin Pop
  if result.failed:
394 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
395 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
396 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
397 a8083063 Iustin Pop
398 a8083063 Iustin Pop
399 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
400 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
401 bf6929a2 Alexander Schreiber

402 bf6929a2 Alexander Schreiber
  """
403 bf6929a2 Alexander Schreiber
  # check bridges existance
404 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
405 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
406 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
407 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
408 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
409 bf6929a2 Alexander Schreiber
410 bf6929a2 Alexander Schreiber
411 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
412 a8083063 Iustin Pop
  """Initialise the cluster.
413 a8083063 Iustin Pop

414 a8083063 Iustin Pop
  """
415 a8083063 Iustin Pop
  HPATH = "cluster-init"
416 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
417 efa14262 Manuel Franceschini
  _OP_REQP = ["cluster_name", "hypervisor_type", "mac_prefix",
418 871705db Manuel Franceschini
              "def_bridge", "master_netdev", "file_storage_dir"]
419 a8083063 Iustin Pop
  REQ_CLUSTER = False
420 a8083063 Iustin Pop
421 a8083063 Iustin Pop
  def BuildHooksEnv(self):
422 a8083063 Iustin Pop
    """Build hooks env.
423 a8083063 Iustin Pop

424 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
425 a8083063 Iustin Pop
    ourselves in the post-run node list.
426 a8083063 Iustin Pop

427 a8083063 Iustin Pop
    """
428 0e137c28 Iustin Pop
    env = {"OP_TARGET": self.op.cluster_name}
429 0e137c28 Iustin Pop
    return env, [], [self.hostname.name]
430 a8083063 Iustin Pop
431 a8083063 Iustin Pop
  def CheckPrereq(self):
432 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
433 a8083063 Iustin Pop

434 a8083063 Iustin Pop
    """
435 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
436 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
437 a8083063 Iustin Pop
438 2a6469d5 Alexander Schreiber
    if self.op.hypervisor_type == constants.HT_XEN_HVM31:
439 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
440 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Please prepare the cluster VNC"
441 2a6469d5 Alexander Schreiber
                                   "password file %s" %
442 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
443 2a6469d5 Alexander Schreiber
444 89e1fc26 Iustin Pop
    self.hostname = hostname = utils.HostInfo()
445 ff98055b Iustin Pop
446 bcf043c9 Iustin Pop
    if hostname.ip.startswith("127."):
447 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
448 107711b0 Michael Hanselmann
                                 " range (%s). Please fix DNS or %s." %
449 107711b0 Michael Hanselmann
                                 (hostname.ip, constants.ETC_HOSTS))
450 130e907e Iustin Pop
451 b15d625f Iustin Pop
    if not utils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT,
452 b15d625f Iustin Pop
                         source=constants.LOCALHOST_IP_ADDRESS):
453 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
454 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
455 3ecf6786 Iustin Pop
                                 " belong to this host."
456 bcf043c9 Iustin Pop
                                 " Aborting." % hostname.ip)
457 a8083063 Iustin Pop
458 411f8ad0 Iustin Pop
    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
459 411f8ad0 Iustin Pop
460 411f8ad0 Iustin Pop
    if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
461 411f8ad0 Iustin Pop
                     timeout=5):
462 411f8ad0 Iustin Pop
      raise errors.OpPrereqError("Cluster IP already active. Aborting.")
463 411f8ad0 Iustin Pop
464 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
465 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
466 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
467 16abfbc2 Alexander Schreiber
    if (secondary_ip and
468 16abfbc2 Alexander Schreiber
        secondary_ip != hostname.ip and
469 b15d625f Iustin Pop
        (not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
470 b15d625f Iustin Pop
                           source=constants.LOCALHOST_IP_ADDRESS))):
471 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("You gave %s as secondary IP,"
472 f4bc1f2c Michael Hanselmann
                                 " but it does not belong to this host." %
473 16abfbc2 Alexander Schreiber
                                 secondary_ip)
474 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
475 a8083063 Iustin Pop
476 efa14262 Manuel Franceschini
    if not hasattr(self.op, "vg_name"):
477 efa14262 Manuel Franceschini
      self.op.vg_name = None
478 efa14262 Manuel Franceschini
    # if vg_name not None, checks if volume group is valid
479 efa14262 Manuel Franceschini
    if self.op.vg_name:
480 efa14262 Manuel Franceschini
      vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
481 efa14262 Manuel Franceschini
      if vgstatus:
482 efa14262 Manuel Franceschini
        raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
483 efa14262 Manuel Franceschini
                                   " you are not using lvm" % vgstatus)
484 a8083063 Iustin Pop
485 2872a949 Manuel Franceschini
    self.op.file_storage_dir = os.path.normpath(self.op.file_storage_dir)
486 2872a949 Manuel Franceschini
487 871705db Manuel Franceschini
    if not os.path.isabs(self.op.file_storage_dir):
488 871705db Manuel Franceschini
      raise errors.OpPrereqError("The file storage directory you have is"
489 871705db Manuel Franceschini
                                 " not an absolute path.")
490 871705db Manuel Franceschini
491 871705db Manuel Franceschini
    if not os.path.exists(self.op.file_storage_dir):
492 2872a949 Manuel Franceschini
      try:
493 2872a949 Manuel Franceschini
        os.makedirs(self.op.file_storage_dir, 0750)
494 2872a949 Manuel Franceschini
      except OSError, err:
495 2872a949 Manuel Franceschini
        raise errors.OpPrereqError("Cannot create file storage directory"
496 2872a949 Manuel Franceschini
                                   " '%s': %s" %
497 2872a949 Manuel Franceschini
                                   (self.op.file_storage_dir, err))
498 2872a949 Manuel Franceschini
499 2872a949 Manuel Franceschini
    if not os.path.isdir(self.op.file_storage_dir):
500 2872a949 Manuel Franceschini
      raise errors.OpPrereqError("The file storage directory '%s' is not"
501 2872a949 Manuel Franceschini
                                 " a directory." % self.op.file_storage_dir)
502 871705db Manuel Franceschini
503 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
504 a8083063 Iustin Pop
                    self.op.mac_prefix):
505 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
506 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
507 a8083063 Iustin Pop
508 2584d4a4 Alexander Schreiber
    if self.op.hypervisor_type not in constants.HYPER_TYPES:
509 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
510 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
511 a8083063 Iustin Pop
512 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
513 880478f8 Iustin Pop
    if result.failed:
514 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
515 8925faaa Iustin Pop
                                 (self.op.master_netdev,
516 8925faaa Iustin Pop
                                  result.output.strip()))
517 880478f8 Iustin Pop
518 7dd30006 Michael Hanselmann
    if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
519 7dd30006 Michael Hanselmann
            os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
520 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("Init.d script '%s' missing or not"
521 f4bc1f2c Michael Hanselmann
                                 " executable." % constants.NODE_INITD_SCRIPT)
522 c7b46d59 Iustin Pop
523 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
524 a8083063 Iustin Pop
    """Initialize the cluster.
525 a8083063 Iustin Pop

526 a8083063 Iustin Pop
    """
527 a8083063 Iustin Pop
    clustername = self.clustername
528 a8083063 Iustin Pop
    hostname = self.hostname
529 a8083063 Iustin Pop
530 a8083063 Iustin Pop
    # set up the simple store
531 4167825b Iustin Pop
    self.sstore = ss = ssconf.SimpleStore()
532 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
533 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
534 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
535 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
536 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
537 871705db Manuel Franceschini
    ss.SetKey(ss.SS_FILE_STORAGE_DIR, self.op.file_storage_dir)
538 a8083063 Iustin Pop
539 a8083063 Iustin Pop
    # set up the inter-node password and certificate
540 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
541 a8083063 Iustin Pop
542 a8083063 Iustin Pop
    # start the master ip
543 bcf043c9 Iustin Pop
    rpc.call_node_start_master(hostname.name)
544 a8083063 Iustin Pop
545 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
546 70d9e3d8 Iustin Pop
    f = open(constants.SSH_HOST_RSA_PUB, 'r')
547 a8083063 Iustin Pop
    try:
548 a8083063 Iustin Pop
      sshline = f.read()
549 a8083063 Iustin Pop
    finally:
550 a8083063 Iustin Pop
      f.close()
551 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
552 a8083063 Iustin Pop
553 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(hostname.name)
554 bcf043c9 Iustin Pop
    _InitSSHSetup(hostname.name)
555 a8083063 Iustin Pop
556 a8083063 Iustin Pop
    # init of cluster config file
557 4167825b Iustin Pop
    self.cfg = cfgw = config.ConfigWriter()
558 bcf043c9 Iustin Pop
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
559 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
560 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
561 a8083063 Iustin Pop
562 f408b346 Michael Hanselmann
    ssh.WriteKnownHostsFile(cfgw, ss, constants.SSH_KNOWN_HOSTS_FILE)
563 f408b346 Michael Hanselmann
564 a8083063 Iustin Pop
565 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
566 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
567 a8083063 Iustin Pop

568 a8083063 Iustin Pop
  """
569 a8083063 Iustin Pop
  _OP_REQP = []
570 a8083063 Iustin Pop
571 a8083063 Iustin Pop
  def CheckPrereq(self):
572 a8083063 Iustin Pop
    """Check prerequisites.
573 a8083063 Iustin Pop

574 a8083063 Iustin Pop
    This checks whether the cluster is empty.
575 a8083063 Iustin Pop

576 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
577 a8083063 Iustin Pop

578 a8083063 Iustin Pop
    """
579 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
580 a8083063 Iustin Pop
581 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
582 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
583 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
584 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
585 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
586 db915bd1 Michael Hanselmann
    if instancelist:
587 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
588 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
589 a8083063 Iustin Pop
590 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
591 a8083063 Iustin Pop
    """Destroys the cluster.
592 a8083063 Iustin Pop

593 a8083063 Iustin Pop
    """
594 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
595 c9064964 Iustin Pop
    if not rpc.call_node_stop_master(master):
596 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
597 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
598 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
599 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
600 c8a0948f Michael Hanselmann
    rpc.call_node_leave_cluster(master)
601 a8083063 Iustin Pop
602 a8083063 Iustin Pop
603 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
604 a8083063 Iustin Pop
  """Verifies the cluster status.
605 a8083063 Iustin Pop

606 a8083063 Iustin Pop
  """
607 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
608 a8083063 Iustin Pop
609 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
610 a8083063 Iustin Pop
                  remote_version, feedback_fn):
611 a8083063 Iustin Pop
    """Run multiple tests against a node.
612 a8083063 Iustin Pop

613 a8083063 Iustin Pop
    Test list:
614 a8083063 Iustin Pop
      - compares ganeti version
615 a8083063 Iustin Pop
      - checks vg existance and size > 20G
616 a8083063 Iustin Pop
      - checks config file checksum
617 a8083063 Iustin Pop
      - checks ssh to other nodes
618 a8083063 Iustin Pop

619 a8083063 Iustin Pop
    Args:
620 a8083063 Iustin Pop
      node: name of the node to check
621 a8083063 Iustin Pop
      file_list: required list of files
622 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
623 098c0958 Michael Hanselmann

624 a8083063 Iustin Pop
    """
625 a8083063 Iustin Pop
    # compares ganeti version
626 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
627 a8083063 Iustin Pop
    if not remote_version:
628 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
629 a8083063 Iustin Pop
      return True
630 a8083063 Iustin Pop
631 a8083063 Iustin Pop
    if local_version != remote_version:
632 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
633 a8083063 Iustin Pop
                      (local_version, node, remote_version))
634 a8083063 Iustin Pop
      return True
635 a8083063 Iustin Pop
636 a8083063 Iustin Pop
    # checks vg existance and size > 20G
637 a8083063 Iustin Pop
638 a8083063 Iustin Pop
    bad = False
639 a8083063 Iustin Pop
    if not vglist:
640 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
641 a8083063 Iustin Pop
                      (node,))
642 a8083063 Iustin Pop
      bad = True
643 a8083063 Iustin Pop
    else:
644 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
645 a8083063 Iustin Pop
      if vgstatus:
646 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
647 a8083063 Iustin Pop
        bad = True
648 a8083063 Iustin Pop
649 a8083063 Iustin Pop
    # checks config file checksum
650 a8083063 Iustin Pop
    # checks ssh to any
651 a8083063 Iustin Pop
652 a8083063 Iustin Pop
    if 'filelist' not in node_result:
653 a8083063 Iustin Pop
      bad = True
654 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
655 a8083063 Iustin Pop
    else:
656 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
657 a8083063 Iustin Pop
      for file_name in file_list:
658 a8083063 Iustin Pop
        if file_name not in remote_cksum:
659 a8083063 Iustin Pop
          bad = True
660 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
661 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
662 a8083063 Iustin Pop
          bad = True
663 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
664 a8083063 Iustin Pop
665 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
666 a8083063 Iustin Pop
      bad = True
667 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
668 a8083063 Iustin Pop
    else:
669 a8083063 Iustin Pop
      if node_result['nodelist']:
670 a8083063 Iustin Pop
        bad = True
671 a8083063 Iustin Pop
        for node in node_result['nodelist']:
672 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
673 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
674 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
675 a8083063 Iustin Pop
    if hyp_result is not None:
676 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
677 a8083063 Iustin Pop
    return bad
678 a8083063 Iustin Pop
679 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
680 c5705f58 Guido Trotter
                      node_instance, feedback_fn):
681 a8083063 Iustin Pop
    """Verify an instance.
682 a8083063 Iustin Pop

683 a8083063 Iustin Pop
    This function checks to see if the required block devices are
684 a8083063 Iustin Pop
    available on the instance's node.
685 a8083063 Iustin Pop

686 a8083063 Iustin Pop
    """
687 a8083063 Iustin Pop
    bad = False
688 a8083063 Iustin Pop
689 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
690 a8083063 Iustin Pop
691 a8083063 Iustin Pop
    node_vol_should = {}
692 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
693 a8083063 Iustin Pop
694 a8083063 Iustin Pop
    for node in node_vol_should:
695 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
696 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
697 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
698 a8083063 Iustin Pop
                          (volume, node))
699 a8083063 Iustin Pop
          bad = True
700 a8083063 Iustin Pop
701 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
702 a872dae6 Guido Trotter
      if (node_current not in node_instance or
703 a872dae6 Guido Trotter
          not instance in node_instance[node_current]):
704 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
705 a8083063 Iustin Pop
                        (instance, node_current))
706 a8083063 Iustin Pop
        bad = True
707 a8083063 Iustin Pop
708 a8083063 Iustin Pop
    for node in node_instance:
709 a8083063 Iustin Pop
      if (not node == node_current):
710 a8083063 Iustin Pop
        if instance in node_instance[node]:
711 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
712 a8083063 Iustin Pop
                          (instance, node))
713 a8083063 Iustin Pop
          bad = True
714 a8083063 Iustin Pop
715 6a438c98 Michael Hanselmann
    return bad
716 a8083063 Iustin Pop
717 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
718 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
719 a8083063 Iustin Pop

720 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
721 a8083063 Iustin Pop
    reported as unknown.
722 a8083063 Iustin Pop

723 a8083063 Iustin Pop
    """
724 a8083063 Iustin Pop
    bad = False
725 a8083063 Iustin Pop
726 a8083063 Iustin Pop
    for node in node_vol_is:
727 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
728 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
729 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
730 a8083063 Iustin Pop
                      (volume, node))
731 a8083063 Iustin Pop
          bad = True
732 a8083063 Iustin Pop
    return bad
733 a8083063 Iustin Pop
734 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
735 a8083063 Iustin Pop
    """Verify the list of running instances.
736 a8083063 Iustin Pop

737 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
738 a8083063 Iustin Pop

739 a8083063 Iustin Pop
    """
740 a8083063 Iustin Pop
    bad = False
741 a8083063 Iustin Pop
    for node in node_instance:
742 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
743 a8083063 Iustin Pop
        if runninginstance not in instancelist:
744 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
745 a8083063 Iustin Pop
                          (runninginstance, node))
746 a8083063 Iustin Pop
          bad = True
747 a8083063 Iustin Pop
    return bad
748 a8083063 Iustin Pop
749 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
750 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
751 2b3b6ddd Guido Trotter

752 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
753 2b3b6ddd Guido Trotter
    was primary for.
754 2b3b6ddd Guido Trotter

755 2b3b6ddd Guido Trotter
    """
756 2b3b6ddd Guido Trotter
    bad = False
757 2b3b6ddd Guido Trotter
758 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
759 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
760 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
761 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
762 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
763 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
764 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
765 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
766 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
767 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
768 2b3b6ddd Guido Trotter
        needed_mem = 0
769 2b3b6ddd Guido Trotter
        for instance in instances:
770 2b3b6ddd Guido Trotter
          needed_mem += instance_cfg[instance].memory
771 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
772 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
773 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
774 2b3b6ddd Guido Trotter
          bad = True
775 2b3b6ddd Guido Trotter
    return bad
776 2b3b6ddd Guido Trotter
777 a8083063 Iustin Pop
  def CheckPrereq(self):
778 a8083063 Iustin Pop
    """Check prerequisites.
779 a8083063 Iustin Pop

780 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
781 e54c4c5e Guido Trotter
    all its members are valid.
782 a8083063 Iustin Pop

783 a8083063 Iustin Pop
    """
784 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
785 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
786 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
787 a8083063 Iustin Pop
788 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
789 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
790 a8083063 Iustin Pop

791 a8083063 Iustin Pop
    """
792 a8083063 Iustin Pop
    bad = False
793 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
794 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
795 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
796 a8083063 Iustin Pop
797 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
798 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
799 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
800 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
801 a8083063 Iustin Pop
    node_volume = {}
802 a8083063 Iustin Pop
    node_instance = {}
803 9c9c7d30 Guido Trotter
    node_info = {}
804 26b6af5e Guido Trotter
    instance_cfg = {}
805 a8083063 Iustin Pop
806 a8083063 Iustin Pop
    # FIXME: verify OS list
807 a8083063 Iustin Pop
    # do local checksums
808 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
809 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
810 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
811 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
812 a8083063 Iustin Pop
813 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
814 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
815 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
816 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
817 a8083063 Iustin Pop
    node_verify_param = {
818 a8083063 Iustin Pop
      'filelist': file_names,
819 a8083063 Iustin Pop
      'nodelist': nodelist,
820 a8083063 Iustin Pop
      'hypervisor': None,
821 a8083063 Iustin Pop
      }
822 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
823 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
824 9c9c7d30 Guido Trotter
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
825 a8083063 Iustin Pop
826 a8083063 Iustin Pop
    for node in nodelist:
827 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
828 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
829 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
830 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
831 a8083063 Iustin Pop
      bad = bad or result
832 a8083063 Iustin Pop
833 a8083063 Iustin Pop
      # node_volume
834 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
835 a8083063 Iustin Pop
836 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
837 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
838 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
839 b63ed789 Iustin Pop
        bad = True
840 b63ed789 Iustin Pop
        node_volume[node] = {}
841 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
842 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
843 a8083063 Iustin Pop
        bad = True
844 a8083063 Iustin Pop
        continue
845 b63ed789 Iustin Pop
      else:
846 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
847 a8083063 Iustin Pop
848 a8083063 Iustin Pop
      # node_instance
849 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
850 a8083063 Iustin Pop
      if type(nodeinstance) != list:
851 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
852 a8083063 Iustin Pop
        bad = True
853 a8083063 Iustin Pop
        continue
854 a8083063 Iustin Pop
855 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
856 a8083063 Iustin Pop
857 9c9c7d30 Guido Trotter
      # node_info
858 9c9c7d30 Guido Trotter
      nodeinfo = all_ninfo[node]
859 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
860 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
861 9c9c7d30 Guido Trotter
        bad = True
862 9c9c7d30 Guido Trotter
        continue
863 9c9c7d30 Guido Trotter
864 9c9c7d30 Guido Trotter
      try:
865 9c9c7d30 Guido Trotter
        node_info[node] = {
866 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
867 9c9c7d30 Guido Trotter
          "dfree": int(nodeinfo['vg_free']),
868 93e4c50b Guido Trotter
          "pinst": [],
869 93e4c50b Guido Trotter
          "sinst": [],
870 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
871 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
872 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
873 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
874 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
875 36e7da50 Guido Trotter
          # secondary.
876 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
877 9c9c7d30 Guido Trotter
        }
878 9c9c7d30 Guido Trotter
      except ValueError:
879 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
880 9c9c7d30 Guido Trotter
        bad = True
881 9c9c7d30 Guido Trotter
        continue
882 9c9c7d30 Guido Trotter
883 a8083063 Iustin Pop
    node_vol_should = {}
884 a8083063 Iustin Pop
885 a8083063 Iustin Pop
    for instance in instancelist:
886 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
887 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
888 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
889 c5705f58 Guido Trotter
                                     node_instance, feedback_fn)
890 c5705f58 Guido Trotter
      bad = bad or result
891 a8083063 Iustin Pop
892 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
893 a8083063 Iustin Pop
894 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
895 26b6af5e Guido Trotter
896 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
897 93e4c50b Guido Trotter
      if pnode in node_info:
898 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
899 93e4c50b Guido Trotter
      else:
900 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
901 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
902 93e4c50b Guido Trotter
        bad = True
903 93e4c50b Guido Trotter
904 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
905 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
906 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
907 93e4c50b Guido Trotter
      # supported either.
908 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
909 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
910 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
911 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
912 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
913 93e4c50b Guido Trotter
                    % instance)
914 93e4c50b Guido Trotter
915 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
916 93e4c50b Guido Trotter
        if snode in node_info:
917 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
918 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
919 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
920 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
921 93e4c50b Guido Trotter
        else:
922 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
923 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
924 93e4c50b Guido Trotter
925 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
926 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
927 a8083063 Iustin Pop
                                       feedback_fn)
928 a8083063 Iustin Pop
    bad = bad or result
929 a8083063 Iustin Pop
930 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
931 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
932 a8083063 Iustin Pop
                                         feedback_fn)
933 a8083063 Iustin Pop
    bad = bad or result
934 a8083063 Iustin Pop
935 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
936 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
937 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
938 e54c4c5e Guido Trotter
      bad = bad or result
939 2b3b6ddd Guido Trotter
940 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
941 2b3b6ddd Guido Trotter
    if i_non_redundant:
942 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
943 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
944 2b3b6ddd Guido Trotter
945 a8083063 Iustin Pop
    return int(bad)
946 a8083063 Iustin Pop
947 a8083063 Iustin Pop
948 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
949 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
950 2c95a8d4 Iustin Pop

951 2c95a8d4 Iustin Pop
  """
952 2c95a8d4 Iustin Pop
  _OP_REQP = []
953 2c95a8d4 Iustin Pop
954 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
955 2c95a8d4 Iustin Pop
    """Check prerequisites.
956 2c95a8d4 Iustin Pop

957 2c95a8d4 Iustin Pop
    This has no prerequisites.
958 2c95a8d4 Iustin Pop

959 2c95a8d4 Iustin Pop
    """
960 2c95a8d4 Iustin Pop
    pass
961 2c95a8d4 Iustin Pop
962 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
963 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
964 2c95a8d4 Iustin Pop

965 2c95a8d4 Iustin Pop
    """
966 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
967 2c95a8d4 Iustin Pop
968 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
969 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
970 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
971 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
972 2c95a8d4 Iustin Pop
973 2c95a8d4 Iustin Pop
    nv_dict = {}
974 2c95a8d4 Iustin Pop
    for inst in instances:
975 2c95a8d4 Iustin Pop
      inst_lvs = {}
976 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
977 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
978 2c95a8d4 Iustin Pop
        continue
979 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
980 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
981 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
982 2c95a8d4 Iustin Pop
        for vol in vol_list:
983 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
984 2c95a8d4 Iustin Pop
985 2c95a8d4 Iustin Pop
    if not nv_dict:
986 2c95a8d4 Iustin Pop
      return result
987 2c95a8d4 Iustin Pop
988 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
989 2c95a8d4 Iustin Pop
990 2c95a8d4 Iustin Pop
    to_act = set()
991 2c95a8d4 Iustin Pop
    for node in nodes:
992 2c95a8d4 Iustin Pop
      # node_volume
993 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
994 2c95a8d4 Iustin Pop
995 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
996 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
997 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
998 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
999 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
1000 2c95a8d4 Iustin Pop
                    (node,))
1001 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1002 2c95a8d4 Iustin Pop
        continue
1003 2c95a8d4 Iustin Pop
1004 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1005 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1006 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1007 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1008 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1009 2c95a8d4 Iustin Pop
1010 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1011 b63ed789 Iustin Pop
    # data better
1012 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1013 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1014 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1015 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1016 b63ed789 Iustin Pop
1017 2c95a8d4 Iustin Pop
    return result
1018 2c95a8d4 Iustin Pop
1019 2c95a8d4 Iustin Pop
1020 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1021 07bd8a51 Iustin Pop
  """Rename the cluster.
1022 07bd8a51 Iustin Pop

1023 07bd8a51 Iustin Pop
  """
1024 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1025 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1026 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1027 07bd8a51 Iustin Pop
1028 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1029 07bd8a51 Iustin Pop
    """Build hooks env.
1030 07bd8a51 Iustin Pop

1031 07bd8a51 Iustin Pop
    """
1032 07bd8a51 Iustin Pop
    env = {
1033 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
1034 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1035 07bd8a51 Iustin Pop
      }
1036 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
1037 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1038 07bd8a51 Iustin Pop
1039 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1040 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1041 07bd8a51 Iustin Pop

1042 07bd8a51 Iustin Pop
    """
1043 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1044 07bd8a51 Iustin Pop
1045 bcf043c9 Iustin Pop
    new_name = hostname.name
1046 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1047 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
1048 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
1049 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1050 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1051 07bd8a51 Iustin Pop
                                 " cluster has changed")
1052 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1053 07bd8a51 Iustin Pop
      result = utils.RunCmd(["fping", "-q", new_ip])
1054 07bd8a51 Iustin Pop
      if not result.failed:
1055 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1056 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1057 07bd8a51 Iustin Pop
                                   new_ip)
1058 07bd8a51 Iustin Pop
1059 07bd8a51 Iustin Pop
    self.op.name = new_name
1060 07bd8a51 Iustin Pop
1061 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1062 07bd8a51 Iustin Pop
    """Rename the cluster.
1063 07bd8a51 Iustin Pop

1064 07bd8a51 Iustin Pop
    """
1065 07bd8a51 Iustin Pop
    clustername = self.op.name
1066 07bd8a51 Iustin Pop
    ip = self.ip
1067 07bd8a51 Iustin Pop
    ss = self.sstore
1068 07bd8a51 Iustin Pop
1069 07bd8a51 Iustin Pop
    # shutdown the master IP
1070 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
1071 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
1072 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1073 07bd8a51 Iustin Pop
1074 07bd8a51 Iustin Pop
    try:
1075 07bd8a51 Iustin Pop
      # modify the sstore
1076 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1077 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1078 07bd8a51 Iustin Pop
1079 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1080 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1081 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1082 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1083 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1084 07bd8a51 Iustin Pop
1085 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1086 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1087 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1088 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1089 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1090 07bd8a51 Iustin Pop
          if not result[to_node]:
1091 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1092 07bd8a51 Iustin Pop
                         (fname, to_node))
1093 07bd8a51 Iustin Pop
    finally:
1094 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
1095 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1096 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1097 07bd8a51 Iustin Pop
1098 07bd8a51 Iustin Pop
1099 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1100 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1101 8084f9f6 Manuel Franceschini

1102 8084f9f6 Manuel Franceschini
  Args:
1103 8084f9f6 Manuel Franceschini
    disk: ganeti.objects.Disk object
1104 8084f9f6 Manuel Franceschini

1105 8084f9f6 Manuel Franceschini
  Returns:
1106 8084f9f6 Manuel Franceschini
    boolean indicating whether a LD_LV dev_type was found or not
1107 8084f9f6 Manuel Franceschini

1108 8084f9f6 Manuel Franceschini
  """
1109 8084f9f6 Manuel Franceschini
  if disk.children:
1110 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1111 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1112 8084f9f6 Manuel Franceschini
        return True
1113 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1114 8084f9f6 Manuel Franceschini
1115 8084f9f6 Manuel Franceschini
1116 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1117 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1118 8084f9f6 Manuel Franceschini

1119 8084f9f6 Manuel Franceschini
  """
1120 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1121 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1122 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1123 8084f9f6 Manuel Franceschini
1124 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1125 8084f9f6 Manuel Franceschini
    """Build hooks env.
1126 8084f9f6 Manuel Franceschini

1127 8084f9f6 Manuel Franceschini
    """
1128 8084f9f6 Manuel Franceschini
    env = {
1129 8084f9f6 Manuel Franceschini
      "OP_TARGET": self.sstore.GetClusterName(),
1130 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1131 8084f9f6 Manuel Franceschini
      }
1132 8084f9f6 Manuel Franceschini
    mn = self.sstore.GetMasterNode()
1133 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1134 8084f9f6 Manuel Franceschini
1135 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1136 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1137 8084f9f6 Manuel Franceschini

1138 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1139 5f83e263 Iustin Pop
    if the given volume group is valid.
1140 8084f9f6 Manuel Franceschini

1141 8084f9f6 Manuel Franceschini
    """
1142 8084f9f6 Manuel Franceschini
    if not self.op.vg_name:
1143 8084f9f6 Manuel Franceschini
      instances = [self.cfg.GetInstanceInfo(name)
1144 8084f9f6 Manuel Franceschini
                   for name in self.cfg.GetInstanceList()]
1145 8084f9f6 Manuel Franceschini
      for inst in instances:
1146 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1147 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1148 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1149 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1150 8084f9f6 Manuel Franceschini
1151 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1152 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1153 8084f9f6 Manuel Franceschini
      node_list = self.cfg.GetNodeList()
1154 8084f9f6 Manuel Franceschini
      vglist = rpc.call_vg_list(node_list)
1155 8084f9f6 Manuel Franceschini
      for node in node_list:
1156 8084f9f6 Manuel Franceschini
        vgstatus = _HasValidVG(vglist[node], self.op.vg_name)
1157 8084f9f6 Manuel Franceschini
        if vgstatus:
1158 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1159 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1160 8084f9f6 Manuel Franceschini
1161 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1162 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1163 8084f9f6 Manuel Franceschini

1164 8084f9f6 Manuel Franceschini
    """
1165 8084f9f6 Manuel Franceschini
    if self.op.vg_name != self.cfg.GetVGName():
1166 8084f9f6 Manuel Franceschini
      self.cfg.SetVGName(self.op.vg_name)
1167 8084f9f6 Manuel Franceschini
    else:
1168 8084f9f6 Manuel Franceschini
      feedback_fn("Cluster LVM configuration already in desired"
1169 8084f9f6 Manuel Franceschini
                  " state, not changing")
1170 8084f9f6 Manuel Franceschini
1171 8084f9f6 Manuel Franceschini
1172 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1173 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1174 a8083063 Iustin Pop

1175 a8083063 Iustin Pop
  """
1176 a8083063 Iustin Pop
  if not instance.disks:
1177 a8083063 Iustin Pop
    return True
1178 a8083063 Iustin Pop
1179 a8083063 Iustin Pop
  if not oneshot:
1180 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1181 a8083063 Iustin Pop
1182 a8083063 Iustin Pop
  node = instance.primary_node
1183 a8083063 Iustin Pop
1184 a8083063 Iustin Pop
  for dev in instance.disks:
1185 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1186 a8083063 Iustin Pop
1187 a8083063 Iustin Pop
  retries = 0
1188 a8083063 Iustin Pop
  while True:
1189 a8083063 Iustin Pop
    max_time = 0
1190 a8083063 Iustin Pop
    done = True
1191 a8083063 Iustin Pop
    cumul_degraded = False
1192 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1193 a8083063 Iustin Pop
    if not rstats:
1194 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1195 a8083063 Iustin Pop
      retries += 1
1196 a8083063 Iustin Pop
      if retries >= 10:
1197 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1198 3ecf6786 Iustin Pop
                                 " aborting." % node)
1199 a8083063 Iustin Pop
      time.sleep(6)
1200 a8083063 Iustin Pop
      continue
1201 a8083063 Iustin Pop
    retries = 0
1202 a8083063 Iustin Pop
    for i in range(len(rstats)):
1203 a8083063 Iustin Pop
      mstat = rstats[i]
1204 a8083063 Iustin Pop
      if mstat is None:
1205 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1206 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1207 a8083063 Iustin Pop
        continue
1208 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1209 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1210 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1211 a8083063 Iustin Pop
      if perc_done is not None:
1212 a8083063 Iustin Pop
        done = False
1213 a8083063 Iustin Pop
        if est_time is not None:
1214 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1215 a8083063 Iustin Pop
          max_time = est_time
1216 a8083063 Iustin Pop
        else:
1217 a8083063 Iustin Pop
          rem_time = "no time estimate"
1218 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1219 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1220 a8083063 Iustin Pop
    if done or oneshot:
1221 a8083063 Iustin Pop
      break
1222 a8083063 Iustin Pop
1223 a8083063 Iustin Pop
    if unlock:
1224 685ee993 Iustin Pop
      #utils.Unlock('cmd')
1225 685ee993 Iustin Pop
      pass
1226 a8083063 Iustin Pop
    try:
1227 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1228 a8083063 Iustin Pop
    finally:
1229 a8083063 Iustin Pop
      if unlock:
1230 685ee993 Iustin Pop
        #utils.Lock('cmd')
1231 685ee993 Iustin Pop
        pass
1232 a8083063 Iustin Pop
1233 a8083063 Iustin Pop
  if done:
1234 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1235 a8083063 Iustin Pop
  return not cumul_degraded
1236 a8083063 Iustin Pop
1237 a8083063 Iustin Pop
1238 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1239 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1240 a8083063 Iustin Pop

1241 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1242 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1243 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1244 0834c866 Iustin Pop

1245 a8083063 Iustin Pop
  """
1246 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1247 0834c866 Iustin Pop
  if ldisk:
1248 0834c866 Iustin Pop
    idx = 6
1249 0834c866 Iustin Pop
  else:
1250 0834c866 Iustin Pop
    idx = 5
1251 a8083063 Iustin Pop
1252 a8083063 Iustin Pop
  result = True
1253 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1254 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1255 a8083063 Iustin Pop
    if not rstats:
1256 aa9d0c32 Guido Trotter
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1257 a8083063 Iustin Pop
      result = False
1258 a8083063 Iustin Pop
    else:
1259 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1260 a8083063 Iustin Pop
  if dev.children:
1261 a8083063 Iustin Pop
    for child in dev.children:
1262 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1263 a8083063 Iustin Pop
1264 a8083063 Iustin Pop
  return result
1265 a8083063 Iustin Pop
1266 a8083063 Iustin Pop
1267 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1268 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1269 a8083063 Iustin Pop

1270 a8083063 Iustin Pop
  """
1271 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1272 a8083063 Iustin Pop
1273 a8083063 Iustin Pop
  def CheckPrereq(self):
1274 a8083063 Iustin Pop
    """Check prerequisites.
1275 a8083063 Iustin Pop

1276 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1277 a8083063 Iustin Pop

1278 a8083063 Iustin Pop
    """
1279 1f9430d6 Iustin Pop
    if self.op.names:
1280 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1281 1f9430d6 Iustin Pop
1282 1f9430d6 Iustin Pop
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1283 1f9430d6 Iustin Pop
    _CheckOutputFields(static=[],
1284 1f9430d6 Iustin Pop
                       dynamic=self.dynamic_fields,
1285 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1286 1f9430d6 Iustin Pop
1287 1f9430d6 Iustin Pop
  @staticmethod
1288 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1289 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1290 1f9430d6 Iustin Pop

1291 1f9430d6 Iustin Pop
      Args:
1292 1f9430d6 Iustin Pop
        node_list: a list with the names of all nodes
1293 1f9430d6 Iustin Pop
        rlist: a map with node names as keys and OS objects as values
1294 1f9430d6 Iustin Pop

1295 1f9430d6 Iustin Pop
      Returns:
1296 1f9430d6 Iustin Pop
        map: a map with osnames as keys and as value another map, with
1297 1f9430d6 Iustin Pop
             nodes as
1298 1f9430d6 Iustin Pop
             keys and list of OS objects as values
1299 1f9430d6 Iustin Pop
             e.g. {"debian-etch": {"node1": [<object>,...],
1300 1f9430d6 Iustin Pop
                                   "node2": [<object>,]}
1301 1f9430d6 Iustin Pop
                  }
1302 1f9430d6 Iustin Pop

1303 1f9430d6 Iustin Pop
    """
1304 1f9430d6 Iustin Pop
    all_os = {}
1305 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1306 1f9430d6 Iustin Pop
      if not nr:
1307 1f9430d6 Iustin Pop
        continue
1308 b4de68a9 Iustin Pop
      for os_obj in nr:
1309 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1310 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1311 1f9430d6 Iustin Pop
          # for each node in node_list
1312 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1313 1f9430d6 Iustin Pop
          for nname in node_list:
1314 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1315 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1316 1f9430d6 Iustin Pop
    return all_os
1317 a8083063 Iustin Pop
1318 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1319 a8083063 Iustin Pop
    """Compute the list of OSes.
1320 a8083063 Iustin Pop

1321 a8083063 Iustin Pop
    """
1322 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1323 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1324 a8083063 Iustin Pop
    if node_data == False:
1325 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1326 1f9430d6 Iustin Pop
    pol = self._DiagnoseByOS(node_list, node_data)
1327 1f9430d6 Iustin Pop
    output = []
1328 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1329 1f9430d6 Iustin Pop
      row = []
1330 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1331 1f9430d6 Iustin Pop
        if field == "name":
1332 1f9430d6 Iustin Pop
          val = os_name
1333 1f9430d6 Iustin Pop
        elif field == "valid":
1334 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1335 1f9430d6 Iustin Pop
        elif field == "node_status":
1336 1f9430d6 Iustin Pop
          val = {}
1337 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1338 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1339 1f9430d6 Iustin Pop
        else:
1340 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1341 1f9430d6 Iustin Pop
        row.append(val)
1342 1f9430d6 Iustin Pop
      output.append(row)
1343 1f9430d6 Iustin Pop
1344 1f9430d6 Iustin Pop
    return output
1345 a8083063 Iustin Pop
1346 a8083063 Iustin Pop
1347 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1348 a8083063 Iustin Pop
  """Logical unit for removing a node.
1349 a8083063 Iustin Pop

1350 a8083063 Iustin Pop
  """
1351 a8083063 Iustin Pop
  HPATH = "node-remove"
1352 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1353 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1354 a8083063 Iustin Pop
1355 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1356 a8083063 Iustin Pop
    """Build hooks env.
1357 a8083063 Iustin Pop

1358 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1359 a8083063 Iustin Pop
    node would not allows itself to run.
1360 a8083063 Iustin Pop

1361 a8083063 Iustin Pop
    """
1362 396e1b78 Michael Hanselmann
    env = {
1363 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1364 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1365 396e1b78 Michael Hanselmann
      }
1366 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1367 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1368 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1369 a8083063 Iustin Pop
1370 a8083063 Iustin Pop
  def CheckPrereq(self):
1371 a8083063 Iustin Pop
    """Check prerequisites.
1372 a8083063 Iustin Pop

1373 a8083063 Iustin Pop
    This checks:
1374 a8083063 Iustin Pop
     - the node exists in the configuration
1375 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1376 a8083063 Iustin Pop
     - it's not the master
1377 a8083063 Iustin Pop

1378 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1379 a8083063 Iustin Pop

1380 a8083063 Iustin Pop
    """
1381 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1382 a8083063 Iustin Pop
    if node is None:
1383 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1384 a8083063 Iustin Pop
1385 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1386 a8083063 Iustin Pop
1387 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1388 a8083063 Iustin Pop
    if node.name == masternode:
1389 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1390 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1391 a8083063 Iustin Pop
1392 a8083063 Iustin Pop
    for instance_name in instance_list:
1393 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1394 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1395 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1396 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1397 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1398 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1399 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1400 a8083063 Iustin Pop
    self.op.node_name = node.name
1401 a8083063 Iustin Pop
    self.node = node
1402 a8083063 Iustin Pop
1403 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1404 a8083063 Iustin Pop
    """Removes the node from the cluster.
1405 a8083063 Iustin Pop

1406 a8083063 Iustin Pop
    """
1407 a8083063 Iustin Pop
    node = self.node
1408 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1409 a8083063 Iustin Pop
                node.name)
1410 a8083063 Iustin Pop
1411 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1412 a8083063 Iustin Pop
1413 c92b310a Michael Hanselmann
    self.ssh.Run(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1414 a8083063 Iustin Pop
1415 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1416 a8083063 Iustin Pop
1417 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1418 a8083063 Iustin Pop
1419 c8a0948f Michael Hanselmann
    _RemoveHostFromEtcHosts(node.name)
1420 c8a0948f Michael Hanselmann
1421 a8083063 Iustin Pop
1422 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1423 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1424 a8083063 Iustin Pop

1425 a8083063 Iustin Pop
  """
1426 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1427 a8083063 Iustin Pop
1428 a8083063 Iustin Pop
  def CheckPrereq(self):
1429 a8083063 Iustin Pop
    """Check prerequisites.
1430 a8083063 Iustin Pop

1431 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1432 a8083063 Iustin Pop

1433 a8083063 Iustin Pop
    """
1434 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1435 3ef10550 Michael Hanselmann
                                     "mtotal", "mnode", "mfree",
1436 3ef10550 Michael Hanselmann
                                     "bootid"])
1437 a8083063 Iustin Pop
1438 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1439 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1440 ec223efb Iustin Pop
                               "pip", "sip"],
1441 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1442 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1443 a8083063 Iustin Pop
1444 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1445 a8083063 Iustin Pop
1446 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1447 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1448 a8083063 Iustin Pop

1449 a8083063 Iustin Pop
    """
1450 246e180a Iustin Pop
    nodenames = self.wanted
1451 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1452 a8083063 Iustin Pop
1453 a8083063 Iustin Pop
    # begin data gathering
1454 a8083063 Iustin Pop
1455 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1456 a8083063 Iustin Pop
      live_data = {}
1457 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1458 a8083063 Iustin Pop
      for name in nodenames:
1459 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1460 a8083063 Iustin Pop
        if nodeinfo:
1461 a8083063 Iustin Pop
          live_data[name] = {
1462 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1463 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1464 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1465 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1466 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1467 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1468 a8083063 Iustin Pop
            }
1469 a8083063 Iustin Pop
        else:
1470 a8083063 Iustin Pop
          live_data[name] = {}
1471 a8083063 Iustin Pop
    else:
1472 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1473 a8083063 Iustin Pop
1474 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1475 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1476 a8083063 Iustin Pop
1477 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1478 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1479 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1480 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1481 a8083063 Iustin Pop
1482 ec223efb Iustin Pop
      for instance_name in instancelist:
1483 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1484 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1485 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1486 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1487 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1488 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1489 a8083063 Iustin Pop
1490 a8083063 Iustin Pop
    # end data gathering
1491 a8083063 Iustin Pop
1492 a8083063 Iustin Pop
    output = []
1493 a8083063 Iustin Pop
    for node in nodelist:
1494 a8083063 Iustin Pop
      node_output = []
1495 a8083063 Iustin Pop
      for field in self.op.output_fields:
1496 a8083063 Iustin Pop
        if field == "name":
1497 a8083063 Iustin Pop
          val = node.name
1498 ec223efb Iustin Pop
        elif field == "pinst_list":
1499 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1500 ec223efb Iustin Pop
        elif field == "sinst_list":
1501 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1502 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1503 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1504 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1505 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1506 a8083063 Iustin Pop
        elif field == "pip":
1507 a8083063 Iustin Pop
          val = node.primary_ip
1508 a8083063 Iustin Pop
        elif field == "sip":
1509 a8083063 Iustin Pop
          val = node.secondary_ip
1510 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1511 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1512 a8083063 Iustin Pop
        else:
1513 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1514 a8083063 Iustin Pop
        node_output.append(val)
1515 a8083063 Iustin Pop
      output.append(node_output)
1516 a8083063 Iustin Pop
1517 a8083063 Iustin Pop
    return output
1518 a8083063 Iustin Pop
1519 a8083063 Iustin Pop
1520 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1521 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1522 dcb93971 Michael Hanselmann

1523 dcb93971 Michael Hanselmann
  """
1524 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1525 dcb93971 Michael Hanselmann
1526 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1527 dcb93971 Michael Hanselmann
    """Check prerequisites.
1528 dcb93971 Michael Hanselmann

1529 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1530 dcb93971 Michael Hanselmann

1531 dcb93971 Michael Hanselmann
    """
1532 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1533 dcb93971 Michael Hanselmann
1534 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1535 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1536 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1537 dcb93971 Michael Hanselmann
1538 dcb93971 Michael Hanselmann
1539 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1540 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1541 dcb93971 Michael Hanselmann

1542 dcb93971 Michael Hanselmann
    """
1543 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1544 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1545 dcb93971 Michael Hanselmann
1546 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1547 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1548 dcb93971 Michael Hanselmann
1549 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1550 dcb93971 Michael Hanselmann
1551 dcb93971 Michael Hanselmann
    output = []
1552 dcb93971 Michael Hanselmann
    for node in nodenames:
1553 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1554 37d19eb2 Michael Hanselmann
        continue
1555 37d19eb2 Michael Hanselmann
1556 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1557 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1558 dcb93971 Michael Hanselmann
1559 dcb93971 Michael Hanselmann
      for vol in node_vols:
1560 dcb93971 Michael Hanselmann
        node_output = []
1561 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1562 dcb93971 Michael Hanselmann
          if field == "node":
1563 dcb93971 Michael Hanselmann
            val = node
1564 dcb93971 Michael Hanselmann
          elif field == "phys":
1565 dcb93971 Michael Hanselmann
            val = vol['dev']
1566 dcb93971 Michael Hanselmann
          elif field == "vg":
1567 dcb93971 Michael Hanselmann
            val = vol['vg']
1568 dcb93971 Michael Hanselmann
          elif field == "name":
1569 dcb93971 Michael Hanselmann
            val = vol['name']
1570 dcb93971 Michael Hanselmann
          elif field == "size":
1571 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1572 dcb93971 Michael Hanselmann
          elif field == "instance":
1573 dcb93971 Michael Hanselmann
            for inst in ilist:
1574 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1575 dcb93971 Michael Hanselmann
                continue
1576 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1577 dcb93971 Michael Hanselmann
                val = inst.name
1578 dcb93971 Michael Hanselmann
                break
1579 dcb93971 Michael Hanselmann
            else:
1580 dcb93971 Michael Hanselmann
              val = '-'
1581 dcb93971 Michael Hanselmann
          else:
1582 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1583 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1584 dcb93971 Michael Hanselmann
1585 dcb93971 Michael Hanselmann
        output.append(node_output)
1586 dcb93971 Michael Hanselmann
1587 dcb93971 Michael Hanselmann
    return output
1588 dcb93971 Michael Hanselmann
1589 dcb93971 Michael Hanselmann
1590 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1591 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1592 a8083063 Iustin Pop

1593 a8083063 Iustin Pop
  """
1594 a8083063 Iustin Pop
  HPATH = "node-add"
1595 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1596 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1597 a8083063 Iustin Pop
1598 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1599 a8083063 Iustin Pop
    """Build hooks env.
1600 a8083063 Iustin Pop

1601 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1602 a8083063 Iustin Pop

1603 a8083063 Iustin Pop
    """
1604 a8083063 Iustin Pop
    env = {
1605 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1606 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1607 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1608 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1609 a8083063 Iustin Pop
      }
1610 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1611 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1612 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1613 a8083063 Iustin Pop
1614 a8083063 Iustin Pop
  def CheckPrereq(self):
1615 a8083063 Iustin Pop
    """Check prerequisites.
1616 a8083063 Iustin Pop

1617 a8083063 Iustin Pop
    This checks:
1618 a8083063 Iustin Pop
     - the new node is not already in the config
1619 a8083063 Iustin Pop
     - it is resolvable
1620 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1621 a8083063 Iustin Pop

1622 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1623 a8083063 Iustin Pop

1624 a8083063 Iustin Pop
    """
1625 a8083063 Iustin Pop
    node_name = self.op.node_name
1626 a8083063 Iustin Pop
    cfg = self.cfg
1627 a8083063 Iustin Pop
1628 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1629 a8083063 Iustin Pop
1630 bcf043c9 Iustin Pop
    node = dns_data.name
1631 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1632 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1633 a8083063 Iustin Pop
    if secondary_ip is None:
1634 a8083063 Iustin Pop
      secondary_ip = primary_ip
1635 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1636 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1637 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1638 e7c6e02b Michael Hanselmann
1639 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1640 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
1641 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1642 e7c6e02b Michael Hanselmann
                                 node)
1643 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
1644 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1645 a8083063 Iustin Pop
1646 a8083063 Iustin Pop
    for existing_node_name in node_list:
1647 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1648 e7c6e02b Michael Hanselmann
1649 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
1650 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
1651 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
1652 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1653 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
1654 e7c6e02b Michael Hanselmann
        continue
1655 e7c6e02b Michael Hanselmann
1656 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1657 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1658 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1659 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1660 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1661 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1662 a8083063 Iustin Pop
1663 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1664 a8083063 Iustin Pop
    # same as for the master
1665 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1666 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1667 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1668 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1669 a8083063 Iustin Pop
      if master_singlehomed:
1670 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1671 3ecf6786 Iustin Pop
                                   " new node has one")
1672 a8083063 Iustin Pop
      else:
1673 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1674 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1675 a8083063 Iustin Pop
1676 a8083063 Iustin Pop
    # checks reachablity
1677 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1678 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1679 a8083063 Iustin Pop
1680 a8083063 Iustin Pop
    if not newbie_singlehomed:
1681 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1682 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1683 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1684 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1685 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1686 a8083063 Iustin Pop
1687 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1688 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1689 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1690 a8083063 Iustin Pop
1691 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1692 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
1693 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Cluster VNC password file %s missing" %
1694 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
1695 2a6469d5 Alexander Schreiber
1696 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1697 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1698 a8083063 Iustin Pop

1699 a8083063 Iustin Pop
    """
1700 a8083063 Iustin Pop
    new_node = self.new_node
1701 a8083063 Iustin Pop
    node = new_node.name
1702 a8083063 Iustin Pop
1703 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1704 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1705 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1706 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1707 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1708 a8083063 Iustin Pop
    try:
1709 a8083063 Iustin Pop
      gntpem = f.read(8192)
1710 a8083063 Iustin Pop
    finally:
1711 a8083063 Iustin Pop
      f.close()
1712 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1713 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1714 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1715 a8083063 Iustin Pop
    # parsed by the shell sequence below
1716 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1717 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1718 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1719 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1720 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1721 a8083063 Iustin Pop
1722 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1723 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1724 a8083063 Iustin Pop
    # either by being constants or by the checks above
1725 a8083063 Iustin Pop
    ss = self.sstore
1726 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1727 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1728 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1729 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1730 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1731 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1732 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1733 a8083063 Iustin Pop
1734 c92b310a Michael Hanselmann
    result = self.ssh.Run(node, 'root', mycommand, batch=False, ask_key=True)
1735 a8083063 Iustin Pop
    if result.failed:
1736 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1737 3ecf6786 Iustin Pop
                               " output: %s" %
1738 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1739 a8083063 Iustin Pop
1740 a8083063 Iustin Pop
    # check connectivity
1741 a8083063 Iustin Pop
    time.sleep(4)
1742 a8083063 Iustin Pop
1743 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1744 a8083063 Iustin Pop
    if result:
1745 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1746 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1747 a8083063 Iustin Pop
                    (node, result))
1748 a8083063 Iustin Pop
      else:
1749 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1750 3ecf6786 Iustin Pop
                                 " node version %s" %
1751 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1752 a8083063 Iustin Pop
    else:
1753 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1754 a8083063 Iustin Pop
1755 a8083063 Iustin Pop
    # setup ssh on node
1756 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1757 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1758 a8083063 Iustin Pop
    keyarray = []
1759 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1760 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1761 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1762 a8083063 Iustin Pop
1763 a8083063 Iustin Pop
    for i in keyfiles:
1764 a8083063 Iustin Pop
      f = open(i, 'r')
1765 a8083063 Iustin Pop
      try:
1766 a8083063 Iustin Pop
        keyarray.append(f.read())
1767 a8083063 Iustin Pop
      finally:
1768 a8083063 Iustin Pop
        f.close()
1769 a8083063 Iustin Pop
1770 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1771 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1772 a8083063 Iustin Pop
1773 a8083063 Iustin Pop
    if not result:
1774 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1775 a8083063 Iustin Pop
1776 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1777 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(new_node.name)
1778 c8a0948f Michael Hanselmann
1779 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1780 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1781 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1782 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1783 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1784 16abfbc2 Alexander Schreiber
                                    10, False):
1785 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1786 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1787 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1788 a8083063 Iustin Pop
1789 c92b310a Michael Hanselmann
    success, msg = self.ssh.VerifyNodeHostname(node)
1790 ff98055b Iustin Pop
    if not success:
1791 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1792 f4bc1f2c Michael Hanselmann
                               " than the one the resolver gives: %s."
1793 f4bc1f2c Michael Hanselmann
                               " Please fix and re-run this command." %
1794 ff98055b Iustin Pop
                               (node, msg))
1795 ff98055b Iustin Pop
1796 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1797 a8083063 Iustin Pop
    # including the node just added
1798 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1799 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1800 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1801 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1802 a8083063 Iustin Pop
1803 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1804 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1805 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1806 a8083063 Iustin Pop
      for to_node in dist_nodes:
1807 a8083063 Iustin Pop
        if not result[to_node]:
1808 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1809 a8083063 Iustin Pop
                       (fname, to_node))
1810 a8083063 Iustin Pop
1811 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1812 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1813 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1814 a8083063 Iustin Pop
    for fname in to_copy:
1815 c92b310a Michael Hanselmann
      if not self.ssh.CopyFileToNode(node, fname):
1816 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1817 a8083063 Iustin Pop
1818 e7c6e02b Michael Hanselmann
    if not self.op.readd:
1819 e7c6e02b Michael Hanselmann
      logger.Info("adding node %s to cluster.conf" % node)
1820 e7c6e02b Michael Hanselmann
      self.cfg.AddNode(new_node)
1821 a8083063 Iustin Pop
1822 a8083063 Iustin Pop
1823 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1824 a8083063 Iustin Pop
  """Failover the master node to the current node.
1825 a8083063 Iustin Pop

1826 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1827 a8083063 Iustin Pop

1828 a8083063 Iustin Pop
  """
1829 a8083063 Iustin Pop
  HPATH = "master-failover"
1830 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1831 a8083063 Iustin Pop
  REQ_MASTER = False
1832 a8083063 Iustin Pop
  _OP_REQP = []
1833 a8083063 Iustin Pop
1834 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1835 a8083063 Iustin Pop
    """Build hooks env.
1836 a8083063 Iustin Pop

1837 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1838 a8083063 Iustin Pop
    the nodes in the post phase.
1839 a8083063 Iustin Pop

1840 a8083063 Iustin Pop
    """
1841 a8083063 Iustin Pop
    env = {
1842 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1843 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1844 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1845 a8083063 Iustin Pop
      }
1846 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1847 a8083063 Iustin Pop
1848 a8083063 Iustin Pop
  def CheckPrereq(self):
1849 a8083063 Iustin Pop
    """Check prerequisites.
1850 a8083063 Iustin Pop

1851 a8083063 Iustin Pop
    This checks that we are not already the master.
1852 a8083063 Iustin Pop

1853 a8083063 Iustin Pop
    """
1854 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1855 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1856 a8083063 Iustin Pop
1857 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1858 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1859 f4bc1f2c Michael Hanselmann
                                 " where you want the new master to be."
1860 f4bc1f2c Michael Hanselmann
                                 " %s is already the master" %
1861 3ecf6786 Iustin Pop
                                 self.old_master)
1862 a8083063 Iustin Pop
1863 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1864 a8083063 Iustin Pop
    """Failover the master node.
1865 a8083063 Iustin Pop

1866 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1867 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1868 a8083063 Iustin Pop
    master.
1869 a8083063 Iustin Pop

1870 a8083063 Iustin Pop
    """
1871 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1872 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1873 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1874 a8083063 Iustin Pop
1875 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1876 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1877 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1878 a8083063 Iustin Pop
1879 880478f8 Iustin Pop
    ss = self.sstore
1880 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1881 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1882 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1883 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1884 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1885 880478f8 Iustin Pop
1886 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1887 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1888 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1889 f4bc1f2c Michael Hanselmann
      feedback_fn("Error in activating the master IP on the new master,"
1890 f4bc1f2c Michael Hanselmann
                  " please fix manually.")
1891 a8083063 Iustin Pop
1892 a8083063 Iustin Pop
1893 a8083063 Iustin Pop
1894 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1895 a8083063 Iustin Pop
  """Query cluster configuration.
1896 a8083063 Iustin Pop

1897 a8083063 Iustin Pop
  """
1898 a8083063 Iustin Pop
  _OP_REQP = []
1899 59322403 Iustin Pop
  REQ_MASTER = False
1900 a8083063 Iustin Pop
1901 a8083063 Iustin Pop
  def CheckPrereq(self):
1902 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1903 a8083063 Iustin Pop

1904 a8083063 Iustin Pop
    """
1905 a8083063 Iustin Pop
    pass
1906 a8083063 Iustin Pop
1907 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1908 a8083063 Iustin Pop
    """Return cluster config.
1909 a8083063 Iustin Pop

1910 a8083063 Iustin Pop
    """
1911 a8083063 Iustin Pop
    result = {
1912 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1913 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1914 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1915 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1916 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1917 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1918 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1919 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1920 a8083063 Iustin Pop
      }
1921 a8083063 Iustin Pop
1922 a8083063 Iustin Pop
    return result
1923 a8083063 Iustin Pop
1924 a8083063 Iustin Pop
1925 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1926 a8083063 Iustin Pop
  """Copy file to cluster.
1927 a8083063 Iustin Pop

1928 a8083063 Iustin Pop
  """
1929 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1930 a8083063 Iustin Pop
1931 a8083063 Iustin Pop
  def CheckPrereq(self):
1932 a8083063 Iustin Pop
    """Check prerequisites.
1933 a8083063 Iustin Pop

1934 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1935 a8083063 Iustin Pop
    of nodes is valid.
1936 a8083063 Iustin Pop

1937 a8083063 Iustin Pop
    """
1938 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1939 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1940 dcb93971 Michael Hanselmann
1941 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1942 a8083063 Iustin Pop
1943 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1944 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1945 a8083063 Iustin Pop

1946 a8083063 Iustin Pop
    Args:
1947 a8083063 Iustin Pop
      opts - class with options as members
1948 a8083063 Iustin Pop
      args - list containing a single element, the file name
1949 a8083063 Iustin Pop
    Opts used:
1950 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1951 a8083063 Iustin Pop

1952 a8083063 Iustin Pop
    """
1953 a8083063 Iustin Pop
    filename = self.op.filename
1954 a8083063 Iustin Pop
1955 89e1fc26 Iustin Pop
    myname = utils.HostInfo().name
1956 a8083063 Iustin Pop
1957 a7ba5e53 Iustin Pop
    for node in self.nodes:
1958 a8083063 Iustin Pop
      if node == myname:
1959 a8083063 Iustin Pop
        continue
1960 c92b310a Michael Hanselmann
      if not self.ssh.CopyFileToNode(node, filename):
1961 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1962 a8083063 Iustin Pop
1963 a8083063 Iustin Pop
1964 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1965 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1966 a8083063 Iustin Pop

1967 a8083063 Iustin Pop
  """
1968 a8083063 Iustin Pop
  _OP_REQP = []
1969 a8083063 Iustin Pop
1970 a8083063 Iustin Pop
  def CheckPrereq(self):
1971 a8083063 Iustin Pop
    """No prerequisites.
1972 a8083063 Iustin Pop

1973 a8083063 Iustin Pop
    """
1974 a8083063 Iustin Pop
    pass
1975 a8083063 Iustin Pop
1976 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1977 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1978 a8083063 Iustin Pop

1979 a8083063 Iustin Pop
    """
1980 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1981 a8083063 Iustin Pop
1982 a8083063 Iustin Pop
1983 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1984 a8083063 Iustin Pop
  """Run a command on some nodes.
1985 a8083063 Iustin Pop

1986 a8083063 Iustin Pop
  """
1987 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1988 a8083063 Iustin Pop
1989 a8083063 Iustin Pop
  def CheckPrereq(self):
1990 a8083063 Iustin Pop
    """Check prerequisites.
1991 a8083063 Iustin Pop

1992 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1993 a8083063 Iustin Pop

1994 a8083063 Iustin Pop
    """
1995 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1996 a8083063 Iustin Pop
1997 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1998 a8083063 Iustin Pop
    """Run a command on some nodes.
1999 a8083063 Iustin Pop

2000 a8083063 Iustin Pop
    """
2001 5f83e263 Iustin Pop
    # put the master at the end of the nodes list
2002 5f83e263 Iustin Pop
    master_node = self.sstore.GetMasterNode()
2003 5f83e263 Iustin Pop
    if master_node in self.nodes:
2004 5f83e263 Iustin Pop
      self.nodes.remove(master_node)
2005 5f83e263 Iustin Pop
      self.nodes.append(master_node)
2006 5f83e263 Iustin Pop
2007 a8083063 Iustin Pop
    data = []
2008 a8083063 Iustin Pop
    for node in self.nodes:
2009 c92b310a Michael Hanselmann
      result = self.ssh.Run(node, "root", self.op.command)
2010 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
2011 a8083063 Iustin Pop
2012 a8083063 Iustin Pop
    return data
2013 a8083063 Iustin Pop
2014 a8083063 Iustin Pop
2015 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2016 a8083063 Iustin Pop
  """Bring up an instance's disks.
2017 a8083063 Iustin Pop

2018 a8083063 Iustin Pop
  """
2019 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2020 a8083063 Iustin Pop
2021 a8083063 Iustin Pop
  def CheckPrereq(self):
2022 a8083063 Iustin Pop
    """Check prerequisites.
2023 a8083063 Iustin Pop

2024 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2025 a8083063 Iustin Pop

2026 a8083063 Iustin Pop
    """
2027 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2028 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2029 a8083063 Iustin Pop
    if instance is None:
2030 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2031 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2032 a8083063 Iustin Pop
    self.instance = instance
2033 a8083063 Iustin Pop
2034 a8083063 Iustin Pop
2035 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2036 a8083063 Iustin Pop
    """Activate the disks.
2037 a8083063 Iustin Pop

2038 a8083063 Iustin Pop
    """
2039 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
2040 a8083063 Iustin Pop
    if not disks_ok:
2041 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2042 a8083063 Iustin Pop
2043 a8083063 Iustin Pop
    return disks_info
2044 a8083063 Iustin Pop
2045 a8083063 Iustin Pop
2046 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
2047 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2048 a8083063 Iustin Pop

2049 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2050 a8083063 Iustin Pop

2051 a8083063 Iustin Pop
  Args:
2052 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
2053 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
2054 a8083063 Iustin Pop
                        in an error return from the function
2055 a8083063 Iustin Pop

2056 a8083063 Iustin Pop
  Returns:
2057 a8083063 Iustin Pop
    false if the operation failed
2058 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
2059 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
2060 a8083063 Iustin Pop
  """
2061 a8083063 Iustin Pop
  device_info = []
2062 a8083063 Iustin Pop
  disks_ok = True
2063 fdbd668d Iustin Pop
  iname = instance.name
2064 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2065 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2066 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2067 fdbd668d Iustin Pop
2068 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2069 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2070 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2071 fdbd668d Iustin Pop
  # SyncSource, etc.)
2072 fdbd668d Iustin Pop
2073 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2074 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2075 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2076 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
2077 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
2078 a8083063 Iustin Pop
      if not result:
2079 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
2080 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
2081 fdbd668d Iustin Pop
        if not ignore_secondaries:
2082 a8083063 Iustin Pop
          disks_ok = False
2083 fdbd668d Iustin Pop
2084 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2085 fdbd668d Iustin Pop
2086 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2087 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2088 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2089 fdbd668d Iustin Pop
      if node != instance.primary_node:
2090 fdbd668d Iustin Pop
        continue
2091 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
2092 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
2093 fdbd668d Iustin Pop
      if not result:
2094 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
2095 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
2096 fdbd668d Iustin Pop
        disks_ok = False
2097 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
2098 a8083063 Iustin Pop
2099 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2100 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2101 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2102 b352ab5b Iustin Pop
  for disk in instance.disks:
2103 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
2104 b352ab5b Iustin Pop
2105 a8083063 Iustin Pop
  return disks_ok, device_info
2106 a8083063 Iustin Pop
2107 a8083063 Iustin Pop
2108 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
2109 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2110 3ecf6786 Iustin Pop

2111 3ecf6786 Iustin Pop
  """
2112 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
2113 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2114 fe7b0351 Michael Hanselmann
  if not disks_ok:
2115 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
2116 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2117 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
2118 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
2119 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2120 fe7b0351 Michael Hanselmann
2121 fe7b0351 Michael Hanselmann
2122 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2123 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2124 a8083063 Iustin Pop

2125 a8083063 Iustin Pop
  """
2126 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2127 a8083063 Iustin Pop
2128 a8083063 Iustin Pop
  def CheckPrereq(self):
2129 a8083063 Iustin Pop
    """Check prerequisites.
2130 a8083063 Iustin Pop

2131 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2132 a8083063 Iustin Pop

2133 a8083063 Iustin Pop
    """
2134 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2135 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2136 a8083063 Iustin Pop
    if instance is None:
2137 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2138 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2139 a8083063 Iustin Pop
    self.instance = instance
2140 a8083063 Iustin Pop
2141 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2142 a8083063 Iustin Pop
    """Deactivate the disks
2143 a8083063 Iustin Pop

2144 a8083063 Iustin Pop
    """
2145 a8083063 Iustin Pop
    instance = self.instance
2146 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
2147 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
2148 a8083063 Iustin Pop
    if not type(ins_l) is list:
2149 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
2150 3ecf6786 Iustin Pop
                               instance.primary_node)
2151 a8083063 Iustin Pop
2152 a8083063 Iustin Pop
    if self.instance.name in ins_l:
2153 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
2154 3ecf6786 Iustin Pop
                               " block devices.")
2155 a8083063 Iustin Pop
2156 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2157 a8083063 Iustin Pop
2158 a8083063 Iustin Pop
2159 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
2160 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2161 a8083063 Iustin Pop

2162 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2163 a8083063 Iustin Pop

2164 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2165 a8083063 Iustin Pop
  ignored.
2166 a8083063 Iustin Pop

2167 a8083063 Iustin Pop
  """
2168 a8083063 Iustin Pop
  result = True
2169 a8083063 Iustin Pop
  for disk in instance.disks:
2170 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2171 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
2172 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
2173 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
2174 a8083063 Iustin Pop
                     (disk.iv_name, node))
2175 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2176 a8083063 Iustin Pop
          result = False
2177 a8083063 Iustin Pop
  return result
2178 a8083063 Iustin Pop
2179 a8083063 Iustin Pop
2180 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
2181 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2182 d4f16fd9 Iustin Pop

2183 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2184 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2185 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2186 d4f16fd9 Iustin Pop
  exception.
2187 d4f16fd9 Iustin Pop

2188 d4f16fd9 Iustin Pop
  Args:
2189 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
2190 d4f16fd9 Iustin Pop
    - node: the node name
2191 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
2192 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
2193 d4f16fd9 Iustin Pop

2194 d4f16fd9 Iustin Pop
  """
2195 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
2196 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2197 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2198 d4f16fd9 Iustin Pop
                             " information" % (node,))
2199 d4f16fd9 Iustin Pop
2200 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2201 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2202 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2203 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2204 d4f16fd9 Iustin Pop
  if requested > free_mem:
2205 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2206 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2207 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2208 d4f16fd9 Iustin Pop
2209 d4f16fd9 Iustin Pop
2210 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2211 a8083063 Iustin Pop
  """Starts an instance.
2212 a8083063 Iustin Pop

2213 a8083063 Iustin Pop
  """
2214 a8083063 Iustin Pop
  HPATH = "instance-start"
2215 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2216 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2217 a8083063 Iustin Pop
2218 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2219 a8083063 Iustin Pop
    """Build hooks env.
2220 a8083063 Iustin Pop

2221 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2222 a8083063 Iustin Pop

2223 a8083063 Iustin Pop
    """
2224 a8083063 Iustin Pop
    env = {
2225 a8083063 Iustin Pop
      "FORCE": self.op.force,
2226 a8083063 Iustin Pop
      }
2227 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2228 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2229 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2230 a8083063 Iustin Pop
    return env, nl, nl
2231 a8083063 Iustin Pop
2232 a8083063 Iustin Pop
  def CheckPrereq(self):
2233 a8083063 Iustin Pop
    """Check prerequisites.
2234 a8083063 Iustin Pop

2235 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2236 a8083063 Iustin Pop

2237 a8083063 Iustin Pop
    """
2238 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2239 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2240 a8083063 Iustin Pop
    if instance is None:
2241 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2242 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2243 a8083063 Iustin Pop
2244 a8083063 Iustin Pop
    # check bridges existance
2245 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2246 a8083063 Iustin Pop
2247 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2248 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2249 d4f16fd9 Iustin Pop
                         instance.memory)
2250 d4f16fd9 Iustin Pop
2251 a8083063 Iustin Pop
    self.instance = instance
2252 a8083063 Iustin Pop
    self.op.instance_name = instance.name
2253 a8083063 Iustin Pop
2254 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2255 a8083063 Iustin Pop
    """Start the instance.
2256 a8083063 Iustin Pop

2257 a8083063 Iustin Pop
    """
2258 a8083063 Iustin Pop
    instance = self.instance
2259 a8083063 Iustin Pop
    force = self.op.force
2260 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2261 a8083063 Iustin Pop
2262 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2263 fe482621 Iustin Pop
2264 a8083063 Iustin Pop
    node_current = instance.primary_node
2265 a8083063 Iustin Pop
2266 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2267 a8083063 Iustin Pop
2268 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2269 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2270 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2271 a8083063 Iustin Pop
2272 a8083063 Iustin Pop
2273 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2274 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2275 bf6929a2 Alexander Schreiber

2276 bf6929a2 Alexander Schreiber
  """
2277 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2278 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2279 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2280 bf6929a2 Alexander Schreiber
2281 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2282 bf6929a2 Alexander Schreiber
    """Build hooks env.
2283 bf6929a2 Alexander Schreiber

2284 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2285 bf6929a2 Alexander Schreiber

2286 bf6929a2 Alexander Schreiber
    """
2287 bf6929a2 Alexander Schreiber
    env = {
2288 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2289 bf6929a2 Alexander Schreiber
      }
2290 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2291 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2292 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2293 bf6929a2 Alexander Schreiber
    return env, nl, nl
2294 bf6929a2 Alexander Schreiber
2295 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2296 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2297 bf6929a2 Alexander Schreiber

2298 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2299 bf6929a2 Alexander Schreiber

2300 bf6929a2 Alexander Schreiber
    """
2301 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
2302 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
2303 bf6929a2 Alexander Schreiber
    if instance is None:
2304 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
2305 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
2306 bf6929a2 Alexander Schreiber
2307 bf6929a2 Alexander Schreiber
    # check bridges existance
2308 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2309 bf6929a2 Alexander Schreiber
2310 bf6929a2 Alexander Schreiber
    self.instance = instance
2311 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
2312 bf6929a2 Alexander Schreiber
2313 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2314 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2315 bf6929a2 Alexander Schreiber

2316 bf6929a2 Alexander Schreiber
    """
2317 bf6929a2 Alexander Schreiber
    instance = self.instance
2318 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2319 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2320 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2321 bf6929a2 Alexander Schreiber
2322 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2323 bf6929a2 Alexander Schreiber
2324 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2325 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
2326 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
2327 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2328 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
2329 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2330 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2331 bf6929a2 Alexander Schreiber
2332 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2333 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2334 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2335 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2336 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2337 bf6929a2 Alexander Schreiber
    else:
2338 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2339 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2340 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2341 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2342 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2343 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2344 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2345 bf6929a2 Alexander Schreiber
2346 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2347 bf6929a2 Alexander Schreiber
2348 bf6929a2 Alexander Schreiber
2349 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2350 a8083063 Iustin Pop
  """Shutdown an instance.
2351 a8083063 Iustin Pop

2352 a8083063 Iustin Pop
  """
2353 a8083063 Iustin Pop
  HPATH = "instance-stop"
2354 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2355 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2356 a8083063 Iustin Pop
2357 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2358 a8083063 Iustin Pop
    """Build hooks env.
2359 a8083063 Iustin Pop

2360 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2361 a8083063 Iustin Pop

2362 a8083063 Iustin Pop
    """
2363 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2364 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2365 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2366 a8083063 Iustin Pop
    return env, nl, nl
2367 a8083063 Iustin Pop
2368 a8083063 Iustin Pop
  def CheckPrereq(self):
2369 a8083063 Iustin Pop
    """Check prerequisites.
2370 a8083063 Iustin Pop

2371 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2372 a8083063 Iustin Pop

2373 a8083063 Iustin Pop
    """
2374 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2375 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2376 a8083063 Iustin Pop
    if instance is None:
2377 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2378 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2379 a8083063 Iustin Pop
    self.instance = instance
2380 a8083063 Iustin Pop
2381 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2382 a8083063 Iustin Pop
    """Shutdown the instance.
2383 a8083063 Iustin Pop

2384 a8083063 Iustin Pop
    """
2385 a8083063 Iustin Pop
    instance = self.instance
2386 a8083063 Iustin Pop
    node_current = instance.primary_node
2387 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2388 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2389 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2390 a8083063 Iustin Pop
2391 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2392 a8083063 Iustin Pop
2393 a8083063 Iustin Pop
2394 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2395 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2396 fe7b0351 Michael Hanselmann

2397 fe7b0351 Michael Hanselmann
  """
2398 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2399 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2400 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2401 fe7b0351 Michael Hanselmann
2402 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2403 fe7b0351 Michael Hanselmann
    """Build hooks env.
2404 fe7b0351 Michael Hanselmann

2405 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2406 fe7b0351 Michael Hanselmann

2407 fe7b0351 Michael Hanselmann
    """
2408 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2409 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2410 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2411 fe7b0351 Michael Hanselmann
    return env, nl, nl
2412 fe7b0351 Michael Hanselmann
2413 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2414 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2415 fe7b0351 Michael Hanselmann

2416 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2417 fe7b0351 Michael Hanselmann

2418 fe7b0351 Michael Hanselmann
    """
2419 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2420 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2421 fe7b0351 Michael Hanselmann
    if instance is None:
2422 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2423 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2424 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2425 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2426 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2427 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2428 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2429 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2430 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2431 fe7b0351 Michael Hanselmann
    if remote_info:
2432 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2433 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2434 3ecf6786 Iustin Pop
                                  instance.primary_node))
2435 d0834de3 Michael Hanselmann
2436 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2437 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2438 d0834de3 Michael Hanselmann
      # OS verification
2439 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2440 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2441 d0834de3 Michael Hanselmann
      if pnode is None:
2442 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2443 3ecf6786 Iustin Pop
                                   self.op.pnode)
2444 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2445 dfa96ded Guido Trotter
      if not os_obj:
2446 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2447 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2448 d0834de3 Michael Hanselmann
2449 fe7b0351 Michael Hanselmann
    self.instance = instance
2450 fe7b0351 Michael Hanselmann
2451 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2452 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2453 fe7b0351 Michael Hanselmann

2454 fe7b0351 Michael Hanselmann
    """
2455 fe7b0351 Michael Hanselmann
    inst = self.instance
2456 fe7b0351 Michael Hanselmann
2457 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2458 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2459 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2460 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2461 d0834de3 Michael Hanselmann
2462 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2463 fe7b0351 Michael Hanselmann
    try:
2464 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2465 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2466 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2467 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2468 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2469 fe7b0351 Michael Hanselmann
    finally:
2470 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2471 fe7b0351 Michael Hanselmann
2472 fe7b0351 Michael Hanselmann
2473 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2474 decd5f45 Iustin Pop
  """Rename an instance.
2475 decd5f45 Iustin Pop

2476 decd5f45 Iustin Pop
  """
2477 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2478 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2479 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2480 decd5f45 Iustin Pop
2481 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2482 decd5f45 Iustin Pop
    """Build hooks env.
2483 decd5f45 Iustin Pop

2484 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2485 decd5f45 Iustin Pop

2486 decd5f45 Iustin Pop
    """
2487 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2488 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2489 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2490 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2491 decd5f45 Iustin Pop
    return env, nl, nl
2492 decd5f45 Iustin Pop
2493 decd5f45 Iustin Pop
  def CheckPrereq(self):
2494 decd5f45 Iustin Pop
    """Check prerequisites.
2495 decd5f45 Iustin Pop

2496 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2497 decd5f45 Iustin Pop

2498 decd5f45 Iustin Pop
    """
2499 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2500 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2501 decd5f45 Iustin Pop
    if instance is None:
2502 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2503 decd5f45 Iustin Pop
                                 self.op.instance_name)
2504 decd5f45 Iustin Pop
    if instance.status != "down":
2505 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2506 decd5f45 Iustin Pop
                                 self.op.instance_name)
2507 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2508 decd5f45 Iustin Pop
    if remote_info:
2509 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2510 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2511 decd5f45 Iustin Pop
                                  instance.primary_node))
2512 decd5f45 Iustin Pop
    self.instance = instance
2513 decd5f45 Iustin Pop
2514 decd5f45 Iustin Pop
    # new name verification
2515 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2516 decd5f45 Iustin Pop
2517 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2518 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2519 7bde3275 Guido Trotter
    if new_name in instance_list:
2520 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2521 c09f363f Manuel Franceschini
                                 new_name)
2522 7bde3275 Guido Trotter
2523 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2524 89e1fc26 Iustin Pop
      command = ["fping", "-q", name_info.ip]
2525 decd5f45 Iustin Pop
      result = utils.RunCmd(command)
2526 decd5f45 Iustin Pop
      if not result.failed:
2527 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2528 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2529 decd5f45 Iustin Pop
2530 decd5f45 Iustin Pop
2531 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2532 decd5f45 Iustin Pop
    """Reinstall the instance.
2533 decd5f45 Iustin Pop

2534 decd5f45 Iustin Pop
    """
2535 decd5f45 Iustin Pop
    inst = self.instance
2536 decd5f45 Iustin Pop
    old_name = inst.name
2537 decd5f45 Iustin Pop
2538 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2539 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2540 b23c4333 Manuel Franceschini
2541 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2542 decd5f45 Iustin Pop
2543 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2544 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2545 decd5f45 Iustin Pop
2546 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2547 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2548 b23c4333 Manuel Franceschini
      result = rpc.call_file_storage_dir_rename(inst.primary_node,
2549 b23c4333 Manuel Franceschini
                                                old_file_storage_dir,
2550 b23c4333 Manuel Franceschini
                                                new_file_storage_dir)
2551 b23c4333 Manuel Franceschini
2552 b23c4333 Manuel Franceschini
      if not result:
2553 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2554 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2555 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2556 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2557 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2558 b23c4333 Manuel Franceschini
2559 b23c4333 Manuel Franceschini
      if not result[0]:
2560 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2561 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2562 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2563 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2564 b23c4333 Manuel Franceschini
2565 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2566 decd5f45 Iustin Pop
    try:
2567 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2568 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2569 f4bc1f2c Michael Hanselmann
        msg = ("Could run OS rename script for instance %s on node %s (but the"
2570 f4bc1f2c Michael Hanselmann
               " instance has been renamed in Ganeti)" %
2571 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2572 decd5f45 Iustin Pop
        logger.Error(msg)
2573 decd5f45 Iustin Pop
    finally:
2574 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2575 decd5f45 Iustin Pop
2576 decd5f45 Iustin Pop
2577 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2578 a8083063 Iustin Pop
  """Remove an instance.
2579 a8083063 Iustin Pop

2580 a8083063 Iustin Pop
  """
2581 a8083063 Iustin Pop
  HPATH = "instance-remove"
2582 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2583 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2584 a8083063 Iustin Pop
2585 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2586 a8083063 Iustin Pop
    """Build hooks env.
2587 a8083063 Iustin Pop

2588 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2589 a8083063 Iustin Pop

2590 a8083063 Iustin Pop
    """
2591 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2592 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2593 a8083063 Iustin Pop
    return env, nl, nl
2594 a8083063 Iustin Pop
2595 a8083063 Iustin Pop
  def CheckPrereq(self):
2596 a8083063 Iustin Pop
    """Check prerequisites.
2597 a8083063 Iustin Pop

2598 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2599 a8083063 Iustin Pop

2600 a8083063 Iustin Pop
    """
2601 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2602 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2603 a8083063 Iustin Pop
    if instance is None:
2604 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2605 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2606 a8083063 Iustin Pop
    self.instance = instance
2607 a8083063 Iustin Pop
2608 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2609 a8083063 Iustin Pop
    """Remove the instance.
2610 a8083063 Iustin Pop

2611 a8083063 Iustin Pop
    """
2612 a8083063 Iustin Pop
    instance = self.instance
2613 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2614 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2615 a8083063 Iustin Pop
2616 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2617 1d67656e Iustin Pop
      if self.op.ignore_failures:
2618 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2619 1d67656e Iustin Pop
      else:
2620 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2621 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2622 a8083063 Iustin Pop
2623 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2624 a8083063 Iustin Pop
2625 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2626 1d67656e Iustin Pop
      if self.op.ignore_failures:
2627 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2628 1d67656e Iustin Pop
      else:
2629 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2630 a8083063 Iustin Pop
2631 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2632 a8083063 Iustin Pop
2633 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2634 a8083063 Iustin Pop
2635 a8083063 Iustin Pop
2636 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2637 a8083063 Iustin Pop
  """Logical unit for querying instances.
2638 a8083063 Iustin Pop

2639 a8083063 Iustin Pop
  """
2640 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2641 a8083063 Iustin Pop
2642 a8083063 Iustin Pop
  def CheckPrereq(self):
2643 a8083063 Iustin Pop
    """Check prerequisites.
2644 a8083063 Iustin Pop

2645 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2646 a8083063 Iustin Pop

2647 a8083063 Iustin Pop
    """
2648 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2649 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2650 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2651 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2652 d6d415e8 Iustin Pop
                               "sda_size", "sdb_size", "vcpus"],
2653 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2654 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2655 a8083063 Iustin Pop
2656 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2657 069dcc86 Iustin Pop
2658 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2659 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2660 a8083063 Iustin Pop

2661 a8083063 Iustin Pop
    """
2662 069dcc86 Iustin Pop
    instance_names = self.wanted
2663 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2664 a8083063 Iustin Pop
                     in instance_names]
2665 a8083063 Iustin Pop
2666 a8083063 Iustin Pop
    # begin data gathering
2667 a8083063 Iustin Pop
2668 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2669 a8083063 Iustin Pop
2670 a8083063 Iustin Pop
    bad_nodes = []
2671 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2672 a8083063 Iustin Pop
      live_data = {}
2673 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2674 a8083063 Iustin Pop
      for name in nodes:
2675 a8083063 Iustin Pop
        result = node_data[name]
2676 a8083063 Iustin Pop
        if result:
2677 a8083063 Iustin Pop
          live_data.update(result)
2678 a8083063 Iustin Pop
        elif result == False:
2679 a8083063 Iustin Pop
          bad_nodes.append(name)
2680 a8083063 Iustin Pop
        # else no instance is alive
2681 a8083063 Iustin Pop
    else:
2682 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2683 a8083063 Iustin Pop
2684 a8083063 Iustin Pop
    # end data gathering
2685 a8083063 Iustin Pop
2686 a8083063 Iustin Pop
    output = []
2687 a8083063 Iustin Pop
    for instance in instance_list:
2688 a8083063 Iustin Pop
      iout = []
2689 a8083063 Iustin Pop
      for field in self.op.output_fields:
2690 a8083063 Iustin Pop
        if field == "name":
2691 a8083063 Iustin Pop
          val = instance.name
2692 a8083063 Iustin Pop
        elif field == "os":
2693 a8083063 Iustin Pop
          val = instance.os
2694 a8083063 Iustin Pop
        elif field == "pnode":
2695 a8083063 Iustin Pop
          val = instance.primary_node
2696 a8083063 Iustin Pop
        elif field == "snodes":
2697 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2698 a8083063 Iustin Pop
        elif field == "admin_state":
2699 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2700 a8083063 Iustin Pop
        elif field == "oper_state":
2701 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2702 8a23d2d3 Iustin Pop
            val = None
2703 a8083063 Iustin Pop
          else:
2704 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2705 d8052456 Iustin Pop
        elif field == "status":
2706 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2707 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2708 d8052456 Iustin Pop
          else:
2709 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2710 d8052456 Iustin Pop
            if running:
2711 d8052456 Iustin Pop
              if instance.status != "down":
2712 d8052456 Iustin Pop
                val = "running"
2713 d8052456 Iustin Pop
              else:
2714 d8052456 Iustin Pop
                val = "ERROR_up"
2715 d8052456 Iustin Pop
            else:
2716 d8052456 Iustin Pop
              if instance.status != "down":
2717 d8052456 Iustin Pop
                val = "ERROR_down"
2718 d8052456 Iustin Pop
              else:
2719 d8052456 Iustin Pop
                val = "ADMIN_down"
2720 a8083063 Iustin Pop
        elif field == "admin_ram":
2721 a8083063 Iustin Pop
          val = instance.memory
2722 a8083063 Iustin Pop
        elif field == "oper_ram":
2723 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2724 8a23d2d3 Iustin Pop
            val = None
2725 a8083063 Iustin Pop
          elif instance.name in live_data:
2726 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2727 a8083063 Iustin Pop
          else:
2728 a8083063 Iustin Pop
            val = "-"
2729 a8083063 Iustin Pop
        elif field == "disk_template":
2730 a8083063 Iustin Pop
          val = instance.disk_template
2731 a8083063 Iustin Pop
        elif field == "ip":
2732 a8083063 Iustin Pop
          val = instance.nics[0].ip
2733 a8083063 Iustin Pop
        elif field == "bridge":
2734 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2735 a8083063 Iustin Pop
        elif field == "mac":
2736 a8083063 Iustin Pop
          val = instance.nics[0].mac
2737 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2738 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2739 644eeef9 Iustin Pop
          if disk is None:
2740 8a23d2d3 Iustin Pop
            val = None
2741 644eeef9 Iustin Pop
          else:
2742 644eeef9 Iustin Pop
            val = disk.size
2743 d6d415e8 Iustin Pop
        elif field == "vcpus":
2744 d6d415e8 Iustin Pop
          val = instance.vcpus
2745 a8083063 Iustin Pop
        else:
2746 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2747 a8083063 Iustin Pop
        iout.append(val)
2748 a8083063 Iustin Pop
      output.append(iout)
2749 a8083063 Iustin Pop
2750 a8083063 Iustin Pop
    return output
2751 a8083063 Iustin Pop
2752 a8083063 Iustin Pop
2753 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2754 a8083063 Iustin Pop
  """Failover an instance.
2755 a8083063 Iustin Pop

2756 a8083063 Iustin Pop
  """
2757 a8083063 Iustin Pop
  HPATH = "instance-failover"
2758 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2759 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2760 a8083063 Iustin Pop
2761 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2762 a8083063 Iustin Pop
    """Build hooks env.
2763 a8083063 Iustin Pop

2764 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2765 a8083063 Iustin Pop

2766 a8083063 Iustin Pop
    """
2767 a8083063 Iustin Pop
    env = {
2768 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2769 a8083063 Iustin Pop
      }
2770 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2771 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2772 a8083063 Iustin Pop
    return env, nl, nl
2773 a8083063 Iustin Pop
2774 a8083063 Iustin Pop
  def CheckPrereq(self):
2775 a8083063 Iustin Pop
    """Check prerequisites.
2776 a8083063 Iustin Pop

2777 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2778 a8083063 Iustin Pop

2779 a8083063 Iustin Pop
    """
2780 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2781 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2782 a8083063 Iustin Pop
    if instance is None:
2783 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2784 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2785 a8083063 Iustin Pop
2786 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2787 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2788 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2789 2a710df1 Michael Hanselmann
2790 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2791 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2792 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2793 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2794 2a710df1 Michael Hanselmann
2795 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2796 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2797 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2798 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2799 3a7c308e Guido Trotter
2800 a8083063 Iustin Pop
    # check bridge existance
2801 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2802 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2803 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2804 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2805 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2806 a8083063 Iustin Pop
2807 a8083063 Iustin Pop
    self.instance = instance
2808 a8083063 Iustin Pop
2809 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2810 a8083063 Iustin Pop
    """Failover an instance.
2811 a8083063 Iustin Pop

2812 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2813 a8083063 Iustin Pop
    starting it on the secondary.
2814 a8083063 Iustin Pop

2815 a8083063 Iustin Pop
    """
2816 a8083063 Iustin Pop
    instance = self.instance
2817 a8083063 Iustin Pop
2818 a8083063 Iustin Pop
    source_node = instance.primary_node
2819 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2820 a8083063 Iustin Pop
2821 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2822 a8083063 Iustin Pop
    for dev in instance.disks:
2823 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2824 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2825 a0aaa0d0 Guido Trotter
        if instance.status == "up" and not self.op.ignore_consistency:
2826 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2827 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2828 a8083063 Iustin Pop
2829 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2830 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2831 a8083063 Iustin Pop
                (instance.name, source_node))
2832 a8083063 Iustin Pop
2833 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2834 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2835 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2836 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2837 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2838 24a40d57 Iustin Pop
      else:
2839 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2840 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2841 a8083063 Iustin Pop
2842 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2843 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2844 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2845 a8083063 Iustin Pop
2846 a8083063 Iustin Pop
    instance.primary_node = target_node
2847 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2848 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2849 a8083063 Iustin Pop
2850 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
2851 12a0cfbe Guido Trotter
    if instance.status == "up":
2852 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
2853 12a0cfbe Guido Trotter
      logger.Info("Starting instance %s on node %s" %
2854 12a0cfbe Guido Trotter
                  (instance.name, target_node))
2855 12a0cfbe Guido Trotter
2856 12a0cfbe Guido Trotter
      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2857 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
2858 12a0cfbe Guido Trotter
      if not disks_ok:
2859 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2860 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
2861 a8083063 Iustin Pop
2862 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
2863 12a0cfbe Guido Trotter
      if not rpc.call_instance_start(target_node, instance, None):
2864 12a0cfbe Guido Trotter
        _ShutdownInstanceDisks(instance, self.cfg)
2865 12a0cfbe Guido Trotter
        raise errors.OpExecError("Could not start instance %s on node %s." %
2866 12a0cfbe Guido Trotter
                                 (instance.name, target_node))
2867 a8083063 Iustin Pop
2868 a8083063 Iustin Pop
2869 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2870 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2871 a8083063 Iustin Pop

2872 a8083063 Iustin Pop
  This always creates all devices.
2873 a8083063 Iustin Pop

2874 a8083063 Iustin Pop
  """
2875 a8083063 Iustin Pop
  if device.children:
2876 a8083063 Iustin Pop
    for child in device.children:
2877 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2878 a8083063 Iustin Pop
        return False
2879 a8083063 Iustin Pop
2880 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2881 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2882 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2883 a8083063 Iustin Pop
  if not new_id:
2884 a8083063 Iustin Pop
    return False
2885 a8083063 Iustin Pop
  if device.physical_id is None:
2886 a8083063 Iustin Pop
    device.physical_id = new_id
2887 a8083063 Iustin Pop
  return True
2888 a8083063 Iustin Pop
2889 a8083063 Iustin Pop
2890 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2891 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2892 a8083063 Iustin Pop

2893 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2894 a8083063 Iustin Pop
  all its children.
2895 a8083063 Iustin Pop

2896 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2897 a8083063 Iustin Pop

2898 a8083063 Iustin Pop
  """
2899 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2900 a8083063 Iustin Pop
    force = True
2901 a8083063 Iustin Pop
  if device.children:
2902 a8083063 Iustin Pop
    for child in device.children:
2903 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2904 3f78eef2 Iustin Pop
                                        child, force, info):
2905 a8083063 Iustin Pop
        return False
2906 a8083063 Iustin Pop
2907 a8083063 Iustin Pop
  if not force:
2908 a8083063 Iustin Pop
    return True
2909 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2910 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2911 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2912 a8083063 Iustin Pop
  if not new_id:
2913 a8083063 Iustin Pop
    return False
2914 a8083063 Iustin Pop
  if device.physical_id is None:
2915 a8083063 Iustin Pop
    device.physical_id = new_id
2916 a8083063 Iustin Pop
  return True
2917 a8083063 Iustin Pop
2918 a8083063 Iustin Pop
2919 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2920 923b1523 Iustin Pop
  """Generate a suitable LV name.
2921 923b1523 Iustin Pop

2922 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2923 923b1523 Iustin Pop

2924 923b1523 Iustin Pop
  """
2925 923b1523 Iustin Pop
  results = []
2926 923b1523 Iustin Pop
  for val in exts:
2927 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2928 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2929 923b1523 Iustin Pop
  return results
2930 923b1523 Iustin Pop
2931 923b1523 Iustin Pop
2932 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2933 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2934 a8083063 Iustin Pop

2935 a8083063 Iustin Pop
  """
2936 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2937 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2938 fe96220b Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2939 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2940 fe96220b Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2941 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2942 fe96220b Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
2943 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2944 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2945 a8083063 Iustin Pop
  return drbd_dev
2946 a8083063 Iustin Pop
2947 a8083063 Iustin Pop
2948 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2949 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2950 a1f445d3 Iustin Pop

2951 a1f445d3 Iustin Pop
  """
2952 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2953 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2954 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2955 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2956 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2957 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2958 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2959 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2960 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2961 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2962 a1f445d3 Iustin Pop
  return drbd_dev
2963 a1f445d3 Iustin Pop
2964 7c0d6283 Michael Hanselmann
2965 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2966 a8083063 Iustin Pop
                          instance_name, primary_node,
2967 0f1a06e3 Manuel Franceschini
                          secondary_nodes, disk_sz, swap_sz,
2968 0f1a06e3 Manuel Franceschini
                          file_storage_dir, file_driver):
2969 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2970 a8083063 Iustin Pop

2971 a8083063 Iustin Pop
  """
2972 a8083063 Iustin Pop
  #TODO: compute space requirements
2973 a8083063 Iustin Pop
2974 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2975 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2976 a8083063 Iustin Pop
    disks = []
2977 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2978 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2979 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2980 923b1523 Iustin Pop
2981 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2982 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2983 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2984 a8083063 Iustin Pop
                           iv_name = "sda")
2985 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2986 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2987 a8083063 Iustin Pop
                           iv_name = "sdb")
2988 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2989 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2990 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2991 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2992 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2993 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2994 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2995 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2996 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2997 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2998 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2999 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
3000 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3001 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3002 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3003 0f1a06e3 Manuel Franceschini
3004 0f1a06e3 Manuel Franceschini
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
3005 0f1a06e3 Manuel Franceschini
                                iv_name="sda", logical_id=(file_driver,
3006 0f1a06e3 Manuel Franceschini
                                "%s/sda" % file_storage_dir))
3007 0f1a06e3 Manuel Franceschini
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
3008 0f1a06e3 Manuel Franceschini
                                iv_name="sdb", logical_id=(file_driver,
3009 0f1a06e3 Manuel Franceschini
                                "%s/sdb" % file_storage_dir))
3010 0f1a06e3 Manuel Franceschini
    disks = [file_sda_dev, file_sdb_dev]
3011 a8083063 Iustin Pop
  else:
3012 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3013 a8083063 Iustin Pop
  return disks
3014 a8083063 Iustin Pop
3015 a8083063 Iustin Pop
3016 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3017 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3018 3ecf6786 Iustin Pop

3019 3ecf6786 Iustin Pop
  """
3020 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3021 a0c3fea1 Michael Hanselmann
3022 a0c3fea1 Michael Hanselmann
3023 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
3024 a8083063 Iustin Pop
  """Create all disks for an instance.
3025 a8083063 Iustin Pop

3026 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
3027 a8083063 Iustin Pop

3028 a8083063 Iustin Pop
  Args:
3029 a8083063 Iustin Pop
    instance: the instance object
3030 a8083063 Iustin Pop

3031 a8083063 Iustin Pop
  Returns:
3032 a8083063 Iustin Pop
    True or False showing the success of the creation process
3033 a8083063 Iustin Pop

3034 a8083063 Iustin Pop
  """
3035 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
3036 a0c3fea1 Michael Hanselmann
3037 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3038 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3039 0f1a06e3 Manuel Franceschini
    result = rpc.call_file_storage_dir_create(instance.primary_node,
3040 0f1a06e3 Manuel Franceschini
                                              file_storage_dir)
3041 0f1a06e3 Manuel Franceschini
3042 0f1a06e3 Manuel Franceschini
    if not result:
3043 b62ddbe5 Guido Trotter
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
3044 0f1a06e3 Manuel Franceschini
      return False
3045 0f1a06e3 Manuel Franceschini
3046 0f1a06e3 Manuel Franceschini
    if not result[0]:
3047 0f1a06e3 Manuel Franceschini
      logger.Error("failed to create directory '%s'" % file_storage_dir)
3048 0f1a06e3 Manuel Franceschini
      return False
3049 0f1a06e3 Manuel Franceschini
3050 a8083063 Iustin Pop
  for device in instance.disks:
3051 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
3052 1c6e3627 Manuel Franceschini
                (device.iv_name, instance.name))
3053 a8083063 Iustin Pop
    #HARDCODE
3054 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
3055 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
3056 3f78eef2 Iustin Pop
                                        device, False, info):
3057 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
3058 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
3059 a8083063 Iustin Pop
        return False
3060 a8083063 Iustin Pop
    #HARDCODE
3061 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3062 3f78eef2 Iustin Pop
                                    instance, device, info):
3063 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
3064 a8083063 Iustin Pop
                   device.iv_name)
3065 a8083063 Iustin Pop
      return False
3066 1c6e3627 Manuel Franceschini
3067 a8083063 Iustin Pop
  return True
3068 a8083063 Iustin Pop
3069 a8083063 Iustin Pop
3070 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
3071 a8083063 Iustin Pop
  """Remove all disks for an instance.
3072 a8083063 Iustin Pop

3073 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
3074 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
3075 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
3076 a8083063 Iustin Pop
  with `_CreateDisks()`).
3077 a8083063 Iustin Pop

3078 a8083063 Iustin Pop
  Args:
3079 a8083063 Iustin Pop
    instance: the instance object
3080 a8083063 Iustin Pop

3081 a8083063 Iustin Pop
  Returns:
3082 a8083063 Iustin Pop
    True or False showing the success of the removal proces
3083 a8083063 Iustin Pop

3084 a8083063 Iustin Pop
  """
3085 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
3086 a8083063 Iustin Pop
3087 a8083063 Iustin Pop
  result = True
3088 a8083063 Iustin Pop
  for device in instance.disks:
3089 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3090 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
3091 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
3092 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
3093 a8083063 Iustin Pop
                     " continuing anyway" %
3094 a8083063 Iustin Pop
                     (device.iv_name, node))
3095 a8083063 Iustin Pop
        result = False
3096 0f1a06e3 Manuel Franceschini
3097 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
3098 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3099 0f1a06e3 Manuel Franceschini
    if not rpc.call_file_storage_dir_remove(instance.primary_node,
3100 0f1a06e3 Manuel Franceschini
                                            file_storage_dir):
3101 0f1a06e3 Manuel Franceschini
      logger.Error("could not remove directory '%s'" % file_storage_dir)
3102 0f1a06e3 Manuel Franceschini
      result = False
3103 0f1a06e3 Manuel Franceschini
3104 a8083063 Iustin Pop
  return result
3105 a8083063 Iustin Pop
3106 a8083063 Iustin Pop
3107 e2fe6369 Iustin Pop
def _ComputeDiskSize(disk_template, disk_size, swap_size):
3108 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
3109 e2fe6369 Iustin Pop

3110 e2fe6369 Iustin Pop
  This is currently hard-coded for the two-drive layout.
3111 e2fe6369 Iustin Pop

3112 e2fe6369 Iustin Pop
  """
3113 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
3114 e2fe6369 Iustin Pop
  req_size_dict = {
3115 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
3116 e2fe6369 Iustin Pop
    constants.DT_PLAIN: disk_size + swap_size,
3117 e2fe6369 Iustin Pop
    # 256 MB are added for drbd metadata, 128MB for each drbd device
3118 e2fe6369 Iustin Pop
    constants.DT_DRBD8: disk_size + swap_size + 256,
3119 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
3120 e2fe6369 Iustin Pop
  }
3121 e2fe6369 Iustin Pop
3122 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
3123 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3124 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
3125 e2fe6369 Iustin Pop
3126 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
3127 e2fe6369 Iustin Pop
3128 e2fe6369 Iustin Pop
3129 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
3130 a8083063 Iustin Pop
  """Create an instance.
3131 a8083063 Iustin Pop

3132 a8083063 Iustin Pop
  """
3133 a8083063 Iustin Pop
  HPATH = "instance-add"
3134 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3135 538475ca Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
3136 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
3137 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
3138 a8083063 Iustin Pop
3139 538475ca Iustin Pop
  def _RunAllocator(self):
3140 538475ca Iustin Pop
    """Run the allocator based on input opcode.
3141 538475ca Iustin Pop

3142 538475ca Iustin Pop
    """
3143 538475ca Iustin Pop
    disks = [{"size": self.op.disk_size, "mode": "w"},
3144 538475ca Iustin Pop
             {"size": self.op.swap_size, "mode": "w"}]
3145 538475ca Iustin Pop
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3146 538475ca Iustin Pop
             "bridge": self.op.bridge}]
3147 d1c2dd75 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3148 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3149 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
3150 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
3151 d1c2dd75 Iustin Pop
                     tags=[],
3152 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
3153 d1c2dd75 Iustin Pop
                     vcpus=self.op.vcpus,
3154 d1c2dd75 Iustin Pop
                     mem_size=self.op.mem_size,
3155 d1c2dd75 Iustin Pop
                     disks=disks,
3156 d1c2dd75 Iustin Pop
                     nics=nics,
3157 29859cb7 Iustin Pop
                     )
3158 d1c2dd75 Iustin Pop
3159 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
3160 d1c2dd75 Iustin Pop
3161 d1c2dd75 Iustin Pop
    if not ial.success:
3162 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3163 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3164 d1c2dd75 Iustin Pop
                                                           ial.info))
3165 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3166 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3167 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
3168 27579978 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3169 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
3170 538475ca Iustin Pop
    logger.ToStdout("Selected nodes for the instance: %s" %
3171 d1c2dd75 Iustin Pop
                    (", ".join(ial.nodes),))
3172 538475ca Iustin Pop
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
3173 d1c2dd75 Iustin Pop
                (self.op.instance_name, self.op.iallocator, ial.nodes))
3174 27579978 Iustin Pop
    if ial.required_nodes == 2:
3175 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
3176 538475ca Iustin Pop
3177 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3178 a8083063 Iustin Pop
    """Build hooks env.
3179 a8083063 Iustin Pop

3180 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3181 a8083063 Iustin Pop

3182 a8083063 Iustin Pop
    """
3183 a8083063 Iustin Pop
    env = {
3184 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3185 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3186 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3187 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
3188 a8083063 Iustin Pop
      }
3189 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3190 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3191 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3192 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
3193 396e1b78 Michael Hanselmann
3194 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3195 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
3196 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
3197 396e1b78 Michael Hanselmann
      status=self.instance_status,
3198 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
3199 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
3200 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
3201 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3202 396e1b78 Michael Hanselmann
    ))
3203 a8083063 Iustin Pop
3204 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
3205 a8083063 Iustin Pop
          self.secondaries)
3206 a8083063 Iustin Pop
    return env, nl, nl
3207 a8083063 Iustin Pop
3208 a8083063 Iustin Pop
3209 a8083063 Iustin Pop
  def CheckPrereq(self):
3210 a8083063 Iustin Pop
    """Check prerequisites.
3211 a8083063 Iustin Pop

3212 a8083063 Iustin Pop
    """
3213 538475ca Iustin Pop
    # set optional parameters to none if they don't exist
3214 538475ca Iustin Pop
    for attr in ["kernel_path", "initrd_path", "hvm_boot_order", "pnode",
3215 538475ca Iustin Pop
                 "iallocator"]:
3216 40ed12dd Guido Trotter
      if not hasattr(self.op, attr):
3217 40ed12dd Guido Trotter
        setattr(self.op, attr, None)
3218 40ed12dd Guido Trotter
3219 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
3220 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
3221 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3222 3ecf6786 Iustin Pop
                                 self.op.mode)
3223 a8083063 Iustin Pop
3224 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
3225 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
3226 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3227 eedc99de Manuel Franceschini
                                 " instances")
3228 eedc99de Manuel Franceschini
3229 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
3230 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
3231 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
3232 a8083063 Iustin Pop
      if src_node is None or src_path is None:
3233 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
3234 3ecf6786 Iustin Pop
                                   " node and path options")
3235 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
3236 a8083063 Iustin Pop
      if src_node_full is None:
3237 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
3238 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
3239 a8083063 Iustin Pop
3240 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
3241 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
3242 a8083063 Iustin Pop
3243 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
3244 a8083063 Iustin Pop
3245 a8083063 Iustin Pop
      if not export_info:
3246 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3247 a8083063 Iustin Pop
3248 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
3249 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
3250 a8083063 Iustin Pop
3251 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3252 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
3253 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3254 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
3255 a8083063 Iustin Pop
3256 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3257 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
3258 3ecf6786 Iustin Pop
                                   " one data disk")
3259 a8083063 Iustin Pop
3260 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
3261 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3262 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3263 a8083063 Iustin Pop
                                                         'disk0_dump'))
3264 a8083063 Iustin Pop
      self.src_image = diskimage
3265 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
3266 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
3267 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
3268 a8083063 Iustin Pop
3269 901a65c1 Iustin Pop
    #### instance parameters check
3270 901a65c1 Iustin Pop
3271 a8083063 Iustin Pop
    # disk template and mirror node verification
3272 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3273 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
3274 a8083063 Iustin Pop
3275 901a65c1 Iustin Pop
    # instance name verification
3276 901a65c1 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
3277 901a65c1 Iustin Pop
3278 901a65c1 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
3279 901a65c1 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
3280 901a65c1 Iustin Pop
    if instance_name in instance_list:
3281 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3282 901a65c1 Iustin Pop
                                 instance_name)
3283 901a65c1 Iustin Pop
3284 901a65c1 Iustin Pop
    # ip validity checks
3285 901a65c1 Iustin Pop
    ip = getattr(self.op, "ip", None)
3286 901a65c1 Iustin Pop
    if ip is None or ip.lower() == "none":
3287 901a65c1 Iustin Pop
      inst_ip = None
3288 901a65c1 Iustin Pop
    elif ip.lower() == "auto":
3289 901a65c1 Iustin Pop
      inst_ip = hostname1.ip
3290 901a65c1 Iustin Pop
    else:
3291 901a65c1 Iustin Pop
      if not utils.IsValidIP(ip):
3292 901a65c1 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3293 901a65c1 Iustin Pop
                                   " like a valid IP" % ip)
3294 901a65c1 Iustin Pop
      inst_ip = ip
3295 901a65c1 Iustin Pop
    self.inst_ip = self.op.ip = inst_ip
3296 901a65c1 Iustin Pop
3297 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
3298 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3299 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
3300 901a65c1 Iustin Pop
3301 901a65c1 Iustin Pop
    if self.op.ip_check:
3302 901a65c1 Iustin Pop
      if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT):
3303 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3304 901a65c1 Iustin Pop
                                   (hostname1.ip, instance_name))
3305 901a65c1 Iustin Pop
3306 901a65c1 Iustin Pop
    # MAC address verification
3307 901a65c1 Iustin Pop
    if self.op.mac != "auto":
3308 901a65c1 Iustin Pop
      if not utils.IsValidMac(self.op.mac.lower()):
3309 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3310 901a65c1 Iustin Pop
                                   self.op.mac)
3311 901a65c1 Iustin Pop
3312 901a65c1 Iustin Pop
    # bridge verification
3313 901a65c1 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3314 901a65c1 Iustin Pop
    if bridge is None:
3315 901a65c1 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3316 901a65c1 Iustin Pop
    else:
3317 901a65c1 Iustin Pop
      self.op.bridge = bridge
3318 901a65c1 Iustin Pop
3319 901a65c1 Iustin Pop
    # boot order verification
3320 901a65c1 Iustin Pop
    if self.op.hvm_boot_order is not None:
3321 901a65c1 Iustin Pop
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3322 901a65c1 Iustin Pop
        raise errors.OpPrereqError("invalid boot order specified,"
3323 901a65c1 Iustin Pop
                                   " must be one or more of [acdn]")
3324 901a65c1 Iustin Pop
    # file storage checks
3325 0f1a06e3 Manuel Franceschini
    if (self.op.file_driver and
3326 0f1a06e3 Manuel Franceschini
        not self.op.file_driver in constants.FILE_DRIVER):
3327 0f1a06e3 Manuel Franceschini
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3328 0f1a06e3 Manuel Franceschini
                                 self.op.file_driver)
3329 0f1a06e3 Manuel Franceschini
3330 0f1a06e3 Manuel Franceschini
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3331 b4de68a9 Iustin Pop
      raise errors.OpPrereqError("File storage directory not a relative"
3332 b4de68a9 Iustin Pop
                                 " path")
3333 538475ca Iustin Pop
    #### allocator run
3334 538475ca Iustin Pop
3335 538475ca Iustin Pop
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3336 538475ca Iustin Pop
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3337 538475ca Iustin Pop
                                 " node must be given")
3338 538475ca Iustin Pop
3339 538475ca Iustin Pop
    if self.op.iallocator is not None:
3340 538475ca Iustin Pop
      self._RunAllocator()
3341 0f1a06e3 Manuel Franceschini
3342 901a65c1 Iustin Pop
    #### node related checks
3343 901a65c1 Iustin Pop
3344 901a65c1 Iustin Pop
    # check primary node
3345 901a65c1 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
3346 901a65c1 Iustin Pop
    if pnode is None:
3347 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
3348 901a65c1 Iustin Pop
                                 self.op.pnode)
3349 901a65c1 Iustin Pop
    self.op.pnode = pnode.name
3350 901a65c1 Iustin Pop
    self.pnode = pnode
3351 901a65c1 Iustin Pop
    self.secondaries = []
3352 901a65c1 Iustin Pop
3353 901a65c1 Iustin Pop
    # mirror node verification
3354 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3355 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
3356 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3357 3ecf6786 Iustin Pop
                                   " a mirror node")
3358 a8083063 Iustin Pop
3359 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
3360 a8083063 Iustin Pop
      if snode_name is None:
3361 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
3362 3ecf6786 Iustin Pop
                                   self.op.snode)
3363 a8083063 Iustin Pop
      elif snode_name == pnode.name:
3364 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3365 3ecf6786 Iustin Pop
                                   " the primary node.")
3366 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
3367 a8083063 Iustin Pop
3368 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
3369 e2fe6369 Iustin Pop
                                self.op.disk_size, self.op.swap_size)
3370 ed1ebc60 Guido Trotter
3371 8d75db10 Iustin Pop
    # Check lv size requirements
3372 8d75db10 Iustin Pop
    if req_size is not None:
3373 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3374 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3375 8d75db10 Iustin Pop
      for node in nodenames:
3376 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3377 8d75db10 Iustin Pop
        if not info:
3378 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3379 8d75db10 Iustin Pop
                                     " from node '%s'" % nodeinfo)
3380 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3381 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3382 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3383 8d75db10 Iustin Pop
                                     " node %s" % node)
3384 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3385 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3386 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3387 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3388 ed1ebc60 Guido Trotter
3389 a8083063 Iustin Pop
    # os verification
3390 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3391 dfa96ded Guido Trotter
    if not os_obj:
3392 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3393 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3394 a8083063 Iustin Pop
3395 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3396 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3397 3b6d8c9b Iustin Pop
3398 a8083063 Iustin Pop
3399 901a65c1 Iustin Pop
    # bridge check on primary node
3400 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3401 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3402 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3403 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3404 a8083063 Iustin Pop
3405 a8083063 Iustin Pop
    if self.op.start:
3406 a8083063 Iustin Pop
      self.instance_status = 'up'
3407 a8083063 Iustin Pop
    else:
3408 a8083063 Iustin Pop
      self.instance_status = 'down'
3409 a8083063 Iustin Pop
3410 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3411 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3412 a8083063 Iustin Pop

3413 a8083063 Iustin Pop
    """
3414 a8083063 Iustin Pop
    instance = self.op.instance_name
3415 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3416 a8083063 Iustin Pop
3417 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3418 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3419 1862d460 Alexander Schreiber
    else:
3420 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3421 1862d460 Alexander Schreiber
3422 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3423 a8083063 Iustin Pop
    if self.inst_ip is not None:
3424 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3425 a8083063 Iustin Pop
3426 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3427 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3428 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3429 2a6469d5 Alexander Schreiber
    else:
3430 2a6469d5 Alexander Schreiber
      network_port = None
3431 58acb49d Alexander Schreiber
3432 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
3433 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
3434 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
3435 2c313123 Manuel Franceschini
    else:
3436 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
3437 2c313123 Manuel Franceschini
3438 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
3439 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
3440 0f1a06e3 Manuel Franceschini
                                        self.sstore.GetFileStorageDir(),
3441 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
3442 0f1a06e3 Manuel Franceschini
3443 0f1a06e3 Manuel Franceschini
3444 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3445 a8083063 Iustin Pop
                                  self.op.disk_template,
3446 a8083063 Iustin Pop
                                  instance, pnode_name,
3447 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3448 0f1a06e3 Manuel Franceschini
                                  self.op.swap_size,
3449 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
3450 0f1a06e3 Manuel Franceschini
                                  self.op.file_driver)
3451 a8083063 Iustin Pop
3452 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3453 a8083063 Iustin Pop
                            primary_node=pnode_name,
3454 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3455 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3456 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3457 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3458 a8083063 Iustin Pop
                            status=self.instance_status,
3459 58acb49d Alexander Schreiber
                            network_port=network_port,
3460 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3461 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3462 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3463 a8083063 Iustin Pop
                            )
3464 a8083063 Iustin Pop
3465 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3466 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3467 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3468 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3469 a8083063 Iustin Pop
3470 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3471 a8083063 Iustin Pop
3472 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3473 a8083063 Iustin Pop
3474 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3475 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3476 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3477 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3478 a8083063 Iustin Pop
      time.sleep(15)
3479 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3480 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3481 a8083063 Iustin Pop
    else:
3482 a8083063 Iustin Pop
      disk_abort = False
3483 a8083063 Iustin Pop
3484 a8083063 Iustin Pop
    if disk_abort:
3485 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3486 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3487 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3488 3ecf6786 Iustin Pop
                               " this instance")
3489 a8083063 Iustin Pop
3490 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3491 a8083063 Iustin Pop
                (instance, pnode_name))
3492 a8083063 Iustin Pop
3493 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3494 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3495 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3496 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3497 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3498 3ecf6786 Iustin Pop
                                   " on node %s" %
3499 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3500 a8083063 Iustin Pop
3501 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3502 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3503 a8083063 Iustin Pop
        src_node = self.op.src_node
3504 a8083063 Iustin Pop
        src_image = self.src_image
3505 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3506 a8083063 Iustin Pop
                                                src_node, src_image):
3507 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3508 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3509 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3510 a8083063 Iustin Pop
      else:
3511 a8083063 Iustin Pop
        # also checked in the prereq part
3512 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3513 3ecf6786 Iustin Pop
                                     % self.op.mode)
3514 a8083063 Iustin Pop
3515 a8083063 Iustin Pop
    if self.op.start:
3516 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3517 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3518 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3519 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3520 a8083063 Iustin Pop
3521 a8083063 Iustin Pop
3522 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3523 a8083063 Iustin Pop
  """Connect to an instance's console.
3524 a8083063 Iustin Pop

3525 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3526 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3527 a8083063 Iustin Pop
  console.
3528 a8083063 Iustin Pop

3529 a8083063 Iustin Pop
  """
3530 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3531 a8083063 Iustin Pop
3532 a8083063 Iustin Pop
  def CheckPrereq(self):
3533 a8083063 Iustin Pop
    """Check prerequisites.
3534 a8083063 Iustin Pop

3535 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3536 a8083063 Iustin Pop

3537 a8083063 Iustin Pop
    """
3538 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3539 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3540 a8083063 Iustin Pop
    if instance is None:
3541 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3542 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3543 a8083063 Iustin Pop
    self.instance = instance
3544 a8083063 Iustin Pop
3545 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3546 a8083063 Iustin Pop
    """Connect to the console of an instance
3547 a8083063 Iustin Pop

3548 a8083063 Iustin Pop
    """
3549 a8083063 Iustin Pop
    instance = self.instance
3550 a8083063 Iustin Pop
    node = instance.primary_node
3551 a8083063 Iustin Pop
3552 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3553 a8083063 Iustin Pop
    if node_insts is False:
3554 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3555 a8083063 Iustin Pop
3556 a8083063 Iustin Pop
    if instance.name not in node_insts:
3557 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3558 a8083063 Iustin Pop
3559 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3560 a8083063 Iustin Pop
3561 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3562 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3563 b047857b Michael Hanselmann
3564 82122173 Iustin Pop
    # build ssh cmdline
3565 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3566 a8083063 Iustin Pop
3567 a8083063 Iustin Pop
3568 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3569 a8083063 Iustin Pop
  """Replace the disks of an instance.
3570 a8083063 Iustin Pop

3571 a8083063 Iustin Pop
  """
3572 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3573 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3574 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3575 a8083063 Iustin Pop
3576 b6e82a65 Iustin Pop
  def _RunAllocator(self):
3577 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
3578 b6e82a65 Iustin Pop

3579 b6e82a65 Iustin Pop
    """
3580 b6e82a65 Iustin Pop
    ial = IAllocator(self.cfg, self.sstore,
3581 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
3582 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
3583 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
3584 b6e82a65 Iustin Pop
3585 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
3586 b6e82a65 Iustin Pop
3587 b6e82a65 Iustin Pop
    if not ial.success:
3588 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
3589 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
3590 b6e82a65 Iustin Pop
                                                           ial.info))
3591 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
3592 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3593 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
3594 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
3595 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
3596 b6e82a65 Iustin Pop
    logger.ToStdout("Selected new secondary for the instance: %s" %
3597 b6e82a65 Iustin Pop
                    self.op.remote_node)
3598 b6e82a65 Iustin Pop
3599 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3600 a8083063 Iustin Pop
    """Build hooks env.
3601 a8083063 Iustin Pop

3602 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3603 a8083063 Iustin Pop

3604 a8083063 Iustin Pop
    """
3605 a8083063 Iustin Pop
    env = {
3606 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3607 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3608 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3609 a8083063 Iustin Pop
      }
3610 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3611 0834c866 Iustin Pop
    nl = [
3612 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3613 0834c866 Iustin Pop
      self.instance.primary_node,
3614 0834c866 Iustin Pop
      ]
3615 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3616 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3617 a8083063 Iustin Pop
    return env, nl, nl
3618 a8083063 Iustin Pop
3619 a8083063 Iustin Pop
  def CheckPrereq(self):
3620 a8083063 Iustin Pop
    """Check prerequisites.
3621 a8083063 Iustin Pop

3622 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3623 a8083063 Iustin Pop

3624 a8083063 Iustin Pop
    """
3625 b6e82a65 Iustin Pop
    if not hasattr(self.op, "remote_node"):
3626 b6e82a65 Iustin Pop
      self.op.remote_node = None
3627 b6e82a65 Iustin Pop
3628 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3629 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3630 a8083063 Iustin Pop
    if instance is None:
3631 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3632 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3633 a8083063 Iustin Pop
    self.instance = instance
3634 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3635 a8083063 Iustin Pop
3636 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3637 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3638 a9e0c397 Iustin Pop
                                 " network mirrored.")
3639 a8083063 Iustin Pop
3640 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3641 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3642 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3643 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3644 a8083063 Iustin Pop
3645 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3646 a9e0c397 Iustin Pop
3647 b6e82a65 Iustin Pop
    ia_name = getattr(self.op, "iallocator", None)
3648 b6e82a65 Iustin Pop
    if ia_name is not None:
3649 b6e82a65 Iustin Pop
      if self.op.remote_node is not None:
3650 b6e82a65 Iustin Pop
        raise errors.OpPrereqError("Give either the iallocator or the new"
3651 b6e82a65 Iustin Pop
                                   " secondary, not both")
3652 b6e82a65 Iustin Pop
      self.op.remote_node = self._RunAllocator()
3653 b6e82a65 Iustin Pop
3654 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
3655 a9e0c397 Iustin Pop
    if remote_node is not None:
3656 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3657 a8083063 Iustin Pop
      if remote_node is None:
3658 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3659 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3660 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3661 a9e0c397 Iustin Pop
    else:
3662 a9e0c397 Iustin Pop
      self.remote_node_info = None
3663 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3664 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3665 3ecf6786 Iustin Pop
                                 " the instance.")
3666 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3667 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3668 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3669 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3670 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3671 0834c866 Iustin Pop
                                   " replacement")
3672 a9e0c397 Iustin Pop
      # the user gave the current secondary, switch to
3673 0834c866 Iustin Pop
      # 'no-replace-secondary' mode for drbd7
3674 a9e0c397 Iustin Pop
      remote_node = None
3675 a9e0c397 Iustin Pop
    if (instance.disk_template == constants.DT_REMOTE_RAID1 and
3676 a9e0c397 Iustin Pop
        self.op.mode != constants.REPLACE_DISK_ALL):
3677 a9e0c397 Iustin Pop
      raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
3678 a9e0c397 Iustin Pop
                                 " disks replacement, not individual ones")
3679 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3680 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3681 7df43a76 Iustin Pop
          remote_node is not None):
3682 7df43a76 Iustin Pop
        # switch to replace secondary mode
3683 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3684 7df43a76 Iustin Pop
3685 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3686 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3687 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3688 a9e0c397 Iustin Pop
                                   " both at once")
3689 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3690 a9e0c397 Iustin Pop
        if remote_node is not None:
3691 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3692 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3693 a9e0c397 Iustin Pop
                                     " node disk replacement")
3694 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3695 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3696 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3697 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3698 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3699 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3700 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3701 a9e0c397 Iustin Pop
      else:
3702 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3703 a9e0c397 Iustin Pop
3704 a9e0c397 Iustin Pop
    for name in self.op.disks:
3705 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3706 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3707 a9e0c397 Iustin Pop
                                   (name, instance.name))
3708 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3709 a8083063 Iustin Pop
3710 a9e0c397 Iustin Pop
  def _ExecRR1(self, feedback_fn):
3711 a8083063 Iustin Pop
    """Replace the disks of an instance.
3712 a8083063 Iustin Pop

3713 a8083063 Iustin Pop
    """
3714 a8083063 Iustin Pop
    instance = self.instance
3715 a8083063 Iustin Pop
    iv_names = {}
3716 a8083063 Iustin Pop
    # start of work
3717 a9e0c397 Iustin Pop
    if self.op.remote_node is None:
3718 a9e0c397 Iustin Pop
      remote_node = self.sec_node
3719 a9e0c397 Iustin Pop
    else:
3720 a9e0c397 Iustin Pop
      remote_node = self.op.remote_node
3721 a8083063 Iustin Pop
    cfg = self.cfg
3722 a8083063 Iustin Pop
    for dev in instance.disks:
3723 a8083063 Iustin Pop
      size = dev.size
3724 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3725 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3726 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3727 923b1523 Iustin Pop
                                       remote_node, size, names)
3728 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3729 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3730 a8083063 Iustin Pop
                  dev.iv_name)
3731 a8083063 Iustin Pop
      #HARDCODE
3732 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
3733 3f78eef2 Iustin Pop
                                        new_drbd, False,
3734 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3735 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create new component on secondary"
3736 f4bc1f2c Michael Hanselmann
                                 " node %s. Full abort, cleanup manually!" %
3737 3ecf6786 Iustin Pop
                                 remote_node)
3738 a8083063 Iustin Pop
3739 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3740 a8083063 Iustin Pop
      #HARDCODE
3741 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3742 3f78eef2 Iustin Pop
                                      instance, new_drbd,
3743 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3744 a8083063 Iustin Pop
        # remove secondary dev
3745 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3746 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3747 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create volume on primary!"
3748 f4bc1f2c Michael Hanselmann
                                 " Full abort, cleanup manually!!")
3749 a8083063 Iustin Pop
3750 a8083063 Iustin Pop
      # the device exists now
3751 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3752 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3753 153d9724 Iustin Pop
      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
3754 153d9724 Iustin Pop
                                           [new_drbd]):
3755 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3756 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3757 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3758 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3759 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3760 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3761 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3762 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3763 a8083063 Iustin Pop
3764 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3765 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3766 a8083063 Iustin Pop
3767 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3768 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3769 a8083063 Iustin Pop
    # return value
3770 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3771 a8083063 Iustin Pop
3772 a8083063 Iustin Pop
    # so check manually all the devices
3773 a8083063 Iustin Pop
    for name in iv_names:
3774 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3775 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3776 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3777 a8083063 Iustin Pop
      if is_degr:
3778 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3779 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3780 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3781 a8083063 Iustin Pop
      if is_degr:
3782 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3783 a8083063 Iustin Pop
3784 a8083063 Iustin Pop
    for name in iv_names:
3785 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3786 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3787 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3788 153d9724 Iustin Pop
      if not rpc.call_blockdev_removechildren(instance.primary_node,
3789 153d9724 Iustin Pop
                                              dev, [child]):
3790 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3791 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3792 a8083063 Iustin Pop
        continue
3793 a8083063 Iustin Pop
3794 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3795 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3796 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3797 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3798 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3799 a8083063 Iustin Pop
                       " continuing operation." % node)
3800 a8083063 Iustin Pop
3801 a8083063 Iustin Pop
      dev.children.remove(child)
3802 a8083063 Iustin Pop
3803 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3804 a8083063 Iustin Pop
3805 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3806 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3807 a9e0c397 Iustin Pop

3808 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3809 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3810 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3811 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3812 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3813 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3814 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3815 a9e0c397 Iustin Pop
      - wait for sync across all devices
3816 a9e0c397 Iustin Pop
      - for each modified disk:
3817 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3818 a9e0c397 Iustin Pop

3819 a9e0c397 Iustin Pop
    Failures are not very well handled.
3820 cff90b79 Iustin Pop

3821 a9e0c397 Iustin Pop
    """
3822 cff90b79 Iustin Pop
    steps_total = 6
3823 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3824 a9e0c397 Iustin Pop
    instance = self.instance
3825 a9e0c397 Iustin Pop
    iv_names = {}
3826 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3827 a9e0c397 Iustin Pop
    # start of work
3828 a9e0c397 Iustin Pop
    cfg = self.cfg
3829 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3830 cff90b79 Iustin Pop
    oth_node = self.oth_node
3831 cff90b79 Iustin Pop
3832 cff90b79 Iustin Pop
    # Step: check device activation
3833 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3834 cff90b79 Iustin Pop
    info("checking volume groups")
3835 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3836 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3837 cff90b79 Iustin Pop
    if not results:
3838 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3839 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3840 cff90b79 Iustin Pop
      res = results.get(node, False)
3841 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3842 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3843 cff90b79 Iustin Pop
                                 (my_vg, node))
3844 cff90b79 Iustin Pop
    for dev in instance.disks:
3845 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3846 cff90b79 Iustin Pop
        continue
3847 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3848 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3849 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3850 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3851 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3852 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3853 cff90b79 Iustin Pop
3854 cff90b79 Iustin Pop
    # Step: check other node consistency
3855 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3856 cff90b79 Iustin Pop
    for dev in instance.disks:
3857 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3858 cff90b79 Iustin Pop
        continue
3859 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3860 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3861 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3862 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3863 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3864 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3865 cff90b79 Iustin Pop
3866 cff90b79 Iustin Pop
    # Step: create new storage
3867 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3868 a9e0c397 Iustin Pop
    for dev in instance.disks:
3869 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3870 a9e0c397 Iustin Pop
        continue
3871 a9e0c397 Iustin Pop
      size = dev.size
3872 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3873 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3874 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3875 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3876 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3877 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3878 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3879 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3880 a9e0c397 Iustin Pop
      old_lvs = dev.children
3881 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3882 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3883 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3884 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3885 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3886 a9e0c397 Iustin Pop
      # are talking about the secondary node
3887 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3888 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3889 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3890 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3891 a9e0c397 Iustin Pop
                                   " node '%s'" %
3892 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3893 a9e0c397 Iustin Pop
3894 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3895 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3896 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3897 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3898 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3899 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3900 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3901 cff90b79 Iustin Pop
      #dev.children = []
3902 cff90b79 Iustin Pop
      #cfg.Update(instance)
3903 a9e0c397 Iustin Pop
3904 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3905 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3906 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3907 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3908 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3909 cff90b79 Iustin Pop
3910 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3911 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3912 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3913 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3914 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3915 cff90b79 Iustin Pop
      rlist = []
3916 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3917 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3918 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3919 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3920 cff90b79 Iustin Pop
3921 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3922 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3923 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3924 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3925 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3926 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3927 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3928 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3929 cff90b79 Iustin Pop
3930 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3931 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3932 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3933 a9e0c397 Iustin Pop
3934 cff90b79 Iustin Pop
      for disk in old_lvs:
3935 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3936 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3937 a9e0c397 Iustin Pop
3938 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3939 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3940 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3941 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3942 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3943 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3944 cff90b79 Iustin Pop
                    " logical volumes")
3945 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3946 a9e0c397 Iustin Pop
3947 a9e0c397 Iustin Pop
      dev.children = new_lvs
3948 a9e0c397 Iustin Pop
      cfg.Update(instance)
3949 a9e0c397 Iustin Pop
3950 cff90b79 Iustin Pop
    # Step: wait for sync
3951 a9e0c397 Iustin Pop
3952 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3953 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3954 a9e0c397 Iustin Pop
    # return value
3955 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3956 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3957 a9e0c397 Iustin Pop
3958 a9e0c397 Iustin Pop
    # so check manually all the devices
3959 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3960 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3961 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3962 a9e0c397 Iustin Pop
      if is_degr:
3963 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3964 a9e0c397 Iustin Pop
3965 cff90b79 Iustin Pop
    # Step: remove old storage
3966 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3967 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3968 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3969 a9e0c397 Iustin Pop
      for lv in old_lvs:
3970 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3971 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3972 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3973 a9e0c397 Iustin Pop
          continue
3974 a9e0c397 Iustin Pop
3975 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3976 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3977 a9e0c397 Iustin Pop

3978 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3979 a9e0c397 Iustin Pop
      - for all disks of the instance:
3980 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3981 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3982 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3983 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3984 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3985 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3986 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3987 a9e0c397 Iustin Pop
          not network enabled
3988 a9e0c397 Iustin Pop
      - wait for sync across all devices
3989 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3990 a9e0c397 Iustin Pop

3991 a9e0c397 Iustin Pop
    Failures are not very well handled.
3992 0834c866 Iustin Pop

3993 a9e0c397 Iustin Pop
    """
3994 0834c866 Iustin Pop
    steps_total = 6
3995 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3996 a9e0c397 Iustin Pop
    instance = self.instance
3997 a9e0c397 Iustin Pop
    iv_names = {}
3998 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3999 a9e0c397 Iustin Pop
    # start of work
4000 a9e0c397 Iustin Pop
    cfg = self.cfg
4001 a9e0c397 Iustin Pop
    old_node = self.tgt_node
4002 a9e0c397 Iustin Pop
    new_node = self.new_node
4003 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
4004 0834c866 Iustin Pop
4005 0834c866 Iustin Pop
    # Step: check device activation
4006 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4007 0834c866 Iustin Pop
    info("checking volume groups")
4008 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
4009 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
4010 0834c866 Iustin Pop
    if not results:
4011 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4012 0834c866 Iustin Pop
    for node in pri_node, new_node:
4013 0834c866 Iustin Pop
      res = results.get(node, False)
4014 0834c866 Iustin Pop
      if not res or my_vg not in res:
4015 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4016 0834c866 Iustin Pop
                                 (my_vg, node))
4017 0834c866 Iustin Pop
    for dev in instance.disks:
4018 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4019 0834c866 Iustin Pop
        continue
4020 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
4021 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4022 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
4023 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
4024 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
4025 0834c866 Iustin Pop
4026 0834c866 Iustin Pop
    # Step: check other node consistency
4027 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4028 0834c866 Iustin Pop
    for dev in instance.disks:
4029 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
4030 0834c866 Iustin Pop
        continue
4031 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
4032 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
4033 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4034 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
4035 0834c866 Iustin Pop
                                 pri_node)
4036 0834c866 Iustin Pop
4037 0834c866 Iustin Pop
    # Step: create new storage
4038 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4039 a9e0c397 Iustin Pop
    for dev in instance.disks:
4040 a9e0c397 Iustin Pop
      size = dev.size
4041 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
4042 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
4043 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
4044 a9e0c397 Iustin Pop
      # are talking about the secondary node
4045 a9e0c397 Iustin Pop
      for new_lv in dev.children:
4046 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
4047 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
4048 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4049 a9e0c397 Iustin Pop
                                   " node '%s'" %
4050 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
4051 a9e0c397 Iustin Pop
4052 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
4053 0834c866 Iustin Pop
4054 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4055 0834c866 Iustin Pop
    for dev in instance.disks:
4056 0834c866 Iustin Pop
      size = dev.size
4057 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
4058 a9e0c397 Iustin Pop
      # create new devices on new_node
4059 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4060 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
4061 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
4062 a9e0c397 Iustin Pop
                              children=dev.children)
4063 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
4064 3f78eef2 Iustin Pop
                                        new_drbd, False,
4065 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
4066 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
4067 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
4068 a9e0c397 Iustin Pop
4069 0834c866 Iustin Pop
    for dev in instance.disks:
4070 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
4071 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
4072 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
4073 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
4074 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
4075 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
4076 a9e0c397 Iustin Pop
4077 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
4078 642445d9 Iustin Pop
    done = 0
4079 642445d9 Iustin Pop
    for dev in instance.disks:
4080 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4081 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
4082 642445d9 Iustin Pop
      # detach from network
4083 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
4084 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
4085 642445d9 Iustin Pop
      # standalone state
4086 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
4087 642445d9 Iustin Pop
        done += 1
4088 642445d9 Iustin Pop
      else:
4089 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
4090 642445d9 Iustin Pop
                dev.iv_name)
4091 642445d9 Iustin Pop
4092 642445d9 Iustin Pop
    if not done:
4093 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
4094 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4095 642445d9 Iustin Pop
4096 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
4097 642445d9 Iustin Pop
    # the instance to point to the new secondary
4098 642445d9 Iustin Pop
    info("updating instance configuration")
4099 642445d9 Iustin Pop
    for dev in instance.disks:
4100 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
4101 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4102 642445d9 Iustin Pop
    cfg.Update(instance)
4103 a9e0c397 Iustin Pop
4104 642445d9 Iustin Pop
    # and now perform the drbd attach
4105 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
4106 642445d9 Iustin Pop
    failures = []
4107 642445d9 Iustin Pop
    for dev in instance.disks:
4108 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
4109 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
4110 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
4111 642445d9 Iustin Pop
      # is correct
4112 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4113 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
4114 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
4115 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
4116 a9e0c397 Iustin Pop
4117 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
4118 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
4119 a9e0c397 Iustin Pop
    # return value
4120 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
4121 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
4122 a9e0c397 Iustin Pop
4123 a9e0c397 Iustin Pop
    # so check manually all the devices
4124 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
4125 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
4126 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
4127 a9e0c397 Iustin Pop
      if is_degr:
4128 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4129 a9e0c397 Iustin Pop
4130 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4131 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
4132 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
4133 a9e0c397 Iustin Pop
      for lv in old_lvs:
4134 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4135 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
4136 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4137 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4138 a9e0c397 Iustin Pop
4139 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4140 a9e0c397 Iustin Pop
    """Execute disk replacement.
4141 a9e0c397 Iustin Pop

4142 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4143 a9e0c397 Iustin Pop

4144 a9e0c397 Iustin Pop
    """
4145 a9e0c397 Iustin Pop
    instance = self.instance
4146 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_REMOTE_RAID1:
4147 a9e0c397 Iustin Pop
      fn = self._ExecRR1
4148 a9e0c397 Iustin Pop
    elif instance.disk_template == constants.DT_DRBD8:
4149 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4150 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4151 a9e0c397 Iustin Pop
      else:
4152 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4153 a9e0c397 Iustin Pop
    else:
4154 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4155 a9e0c397 Iustin Pop
    return fn(feedback_fn)
4156 a9e0c397 Iustin Pop
4157 a8083063 Iustin Pop
4158 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4159 a8083063 Iustin Pop
  """Query runtime instance data.
4160 a8083063 Iustin Pop

4161 a8083063 Iustin Pop
  """
4162 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
4163 a8083063 Iustin Pop
4164 a8083063 Iustin Pop
  def CheckPrereq(self):
4165 a8083063 Iustin Pop
    """Check prerequisites.
4166 a8083063 Iustin Pop

4167 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4168 a8083063 Iustin Pop

4169 a8083063 Iustin Pop
    """
4170 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
4171 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4172 a8083063 Iustin Pop
    if self.op.instances:
4173 a8083063 Iustin Pop
      self.wanted_instances = []
4174 a8083063 Iustin Pop
      names = self.op.instances
4175 a8083063 Iustin Pop
      for name in names:
4176 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
4177 a8083063 Iustin Pop
        if instance is None:
4178 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
4179 515207af Guido Trotter
        self.wanted_instances.append(instance)
4180 a8083063 Iustin Pop
    else:
4181 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4182 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
4183 a8083063 Iustin Pop
    return
4184 a8083063 Iustin Pop
4185 a8083063 Iustin Pop
4186 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4187 a8083063 Iustin Pop
    """Compute block device status.
4188 a8083063 Iustin Pop

4189 a8083063 Iustin Pop
    """
4190 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4191 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4192 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4193 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4194 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4195 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4196 a8083063 Iustin Pop
      else:
4197 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4198 a8083063 Iustin Pop
4199 a8083063 Iustin Pop
    if snode:
4200 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4201 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4202 a8083063 Iustin Pop
    else:
4203 a8083063 Iustin Pop
      dev_sstatus = None
4204 a8083063 Iustin Pop
4205 a8083063 Iustin Pop
    if dev.children:
4206 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4207 a8083063 Iustin Pop
                      for child in dev.children]
4208 a8083063 Iustin Pop
    else:
4209 a8083063 Iustin Pop
      dev_children = []
4210 a8083063 Iustin Pop
4211 a8083063 Iustin Pop
    data = {
4212 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4213 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4214 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4215 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4216 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4217 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4218 a8083063 Iustin Pop
      "children": dev_children,
4219 a8083063 Iustin Pop
      }
4220 a8083063 Iustin Pop
4221 a8083063 Iustin Pop
    return data
4222 a8083063 Iustin Pop
4223 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4224 a8083063 Iustin Pop
    """Gather and return data"""
4225 a8083063 Iustin Pop
    result = {}
4226 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4227 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4228 a8083063 Iustin Pop
                                                instance.name)
4229 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4230 a8083063 Iustin Pop
        remote_state = "up"
4231 a8083063 Iustin Pop
      else:
4232 a8083063 Iustin Pop
        remote_state = "down"
4233 a8083063 Iustin Pop
      if instance.status == "down":
4234 a8083063 Iustin Pop
        config_state = "down"
4235 a8083063 Iustin Pop
      else:
4236 a8083063 Iustin Pop
        config_state = "up"
4237 a8083063 Iustin Pop
4238 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4239 a8083063 Iustin Pop
               for device in instance.disks]
4240 a8083063 Iustin Pop
4241 a8083063 Iustin Pop
      idict = {
4242 a8083063 Iustin Pop
        "name": instance.name,
4243 a8083063 Iustin Pop
        "config_state": config_state,
4244 a8083063 Iustin Pop
        "run_state": remote_state,
4245 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4246 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4247 a8083063 Iustin Pop
        "os": instance.os,
4248 a8083063 Iustin Pop
        "memory": instance.memory,
4249 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4250 a8083063 Iustin Pop
        "disks": disks,
4251 58acb49d Alexander Schreiber
        "network_port": instance.network_port,
4252 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4253 71aa8f73 Iustin Pop
        "kernel_path": instance.kernel_path,
4254 71aa8f73 Iustin Pop
        "initrd_path": instance.initrd_path,
4255 8ae6bb54 Iustin Pop
        "hvm_boot_order": instance.hvm_boot_order,
4256 a8083063 Iustin Pop
        }
4257 a8083063 Iustin Pop
4258 a8083063 Iustin Pop
      result[instance.name] = idict
4259 a8083063 Iustin Pop
4260 a8083063 Iustin Pop
    return result
4261 a8083063 Iustin Pop
4262 a8083063 Iustin Pop
4263 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
4264 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4265 a8083063 Iustin Pop

4266 a8083063 Iustin Pop
  """
4267 a8083063 Iustin Pop
  HPATH = "instance-modify"
4268 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4269 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4270 a8083063 Iustin Pop
4271 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4272 a8083063 Iustin Pop
    """Build hooks env.
4273 a8083063 Iustin Pop

4274 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4275 a8083063 Iustin Pop

4276 a8083063 Iustin Pop
    """
4277 396e1b78 Michael Hanselmann
    args = dict()
4278 a8083063 Iustin Pop
    if self.mem:
4279 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4280 a8083063 Iustin Pop
    if self.vcpus:
4281 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4282 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4283 396e1b78 Michael Hanselmann
      if self.do_ip:
4284 396e1b78 Michael Hanselmann
        ip = self.ip
4285 396e1b78 Michael Hanselmann
      else:
4286 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4287 396e1b78 Michael Hanselmann
      if self.bridge:
4288 396e1b78 Michael Hanselmann
        bridge = self.bridge
4289 396e1b78 Michael Hanselmann
      else:
4290 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4291 ef756965 Iustin Pop
      if self.mac:
4292 ef756965 Iustin Pop
        mac = self.mac
4293 ef756965 Iustin Pop
      else:
4294 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4295 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4296 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4297 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4298 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4299 a8083063 Iustin Pop
    return env, nl, nl
4300 a8083063 Iustin Pop
4301 a8083063 Iustin Pop
  def CheckPrereq(self):
4302 a8083063 Iustin Pop
    """Check prerequisites.
4303 a8083063 Iustin Pop

4304 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4305 a8083063 Iustin Pop

4306 a8083063 Iustin Pop
    """
4307 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4308 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4309 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4310 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4311 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4312 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4313 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4314 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4315 7767bbf5 Manuel Franceschini
    all_params = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4316 7767bbf5 Manuel Franceschini
                  self.kernel_path, self.initrd_path, self.hvm_boot_order]
4317 7767bbf5 Manuel Franceschini
    if all_params.count(None) == len(all_params):
4318 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4319 a8083063 Iustin Pop
    if self.mem is not None:
4320 a8083063 Iustin Pop
      try:
4321 a8083063 Iustin Pop
        self.mem = int(self.mem)
4322 a8083063 Iustin Pop
      except ValueError, err:
4323 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4324 a8083063 Iustin Pop
    if self.vcpus is not None:
4325 a8083063 Iustin Pop
      try:
4326 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4327 a8083063 Iustin Pop
      except ValueError, err:
4328 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4329 a8083063 Iustin Pop
    if self.ip is not None:
4330 a8083063 Iustin Pop
      self.do_ip = True
4331 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4332 a8083063 Iustin Pop
        self.ip = None
4333 a8083063 Iustin Pop
      else:
4334 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4335 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4336 a8083063 Iustin Pop
    else:
4337 a8083063 Iustin Pop
      self.do_ip = False
4338 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4339 1862d460 Alexander Schreiber
    if self.mac is not None:
4340 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4341 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4342 1862d460 Alexander Schreiber
                                   self.mac)
4343 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4344 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4345 a8083063 Iustin Pop
4346 973d7867 Iustin Pop
    if self.kernel_path is not None:
4347 973d7867 Iustin Pop
      self.do_kernel_path = True
4348 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4349 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4350 973d7867 Iustin Pop
4351 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4352 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4353 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4354 973d7867 Iustin Pop
                                    " filename")
4355 8cafeb26 Iustin Pop
    else:
4356 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4357 973d7867 Iustin Pop
4358 973d7867 Iustin Pop
    if self.initrd_path is not None:
4359 973d7867 Iustin Pop
      self.do_initrd_path = True
4360 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4361 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4362 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4363 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4364 973d7867 Iustin Pop
                                    " filename")
4365 8cafeb26 Iustin Pop
    else:
4366 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4367 973d7867 Iustin Pop
4368 25c5878d Alexander Schreiber
    # boot order verification
4369 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4370 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4371 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4372 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4373 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4374 25c5878d Alexander Schreiber
                                     " or 'default'")
4375 25c5878d Alexander Schreiber
4376 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4377 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
4378 a8083063 Iustin Pop
    if instance is None:
4379 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
4380 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4381 a8083063 Iustin Pop
    self.op.instance_name = instance.name
4382 a8083063 Iustin Pop
    self.instance = instance
4383 a8083063 Iustin Pop
    return
4384 a8083063 Iustin Pop
4385 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4386 a8083063 Iustin Pop
    """Modifies an instance.
4387 a8083063 Iustin Pop

4388 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4389 a8083063 Iustin Pop
    """
4390 a8083063 Iustin Pop
    result = []
4391 a8083063 Iustin Pop
    instance = self.instance
4392 a8083063 Iustin Pop
    if self.mem:
4393 a8083063 Iustin Pop
      instance.memory = self.mem
4394 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4395 a8083063 Iustin Pop
    if self.vcpus:
4396 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4397 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4398 a8083063 Iustin Pop
    if self.do_ip:
4399 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4400 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4401 a8083063 Iustin Pop
    if self.bridge:
4402 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4403 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4404 1862d460 Alexander Schreiber
    if self.mac:
4405 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4406 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4407 973d7867 Iustin Pop
    if self.do_kernel_path:
4408 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4409 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4410 973d7867 Iustin Pop
    if self.do_initrd_path:
4411 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4412 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4413 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4414 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4415 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4416 25c5878d Alexander Schreiber
      else:
4417 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4418 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4419 a8083063 Iustin Pop
4420 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
4421 a8083063 Iustin Pop
4422 a8083063 Iustin Pop
    return result
4423 a8083063 Iustin Pop
4424 a8083063 Iustin Pop
4425 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4426 a8083063 Iustin Pop
  """Query the exports list
4427 a8083063 Iustin Pop

4428 a8083063 Iustin Pop
  """
4429 a8083063 Iustin Pop
  _OP_REQP = []
4430 a8083063 Iustin Pop
4431 a8083063 Iustin Pop
  def CheckPrereq(self):
4432 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4433 a8083063 Iustin Pop

4434 a8083063 Iustin Pop
    """
4435 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4436 a8083063 Iustin Pop
4437 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4438 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4439 a8083063 Iustin Pop

4440 a8083063 Iustin Pop
    Returns:
4441 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4442 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4443 a8083063 Iustin Pop
      that node.
4444 a8083063 Iustin Pop

4445 a8083063 Iustin Pop
    """
4446 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4447 a8083063 Iustin Pop
4448 a8083063 Iustin Pop
4449 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4450 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4451 a8083063 Iustin Pop

4452 a8083063 Iustin Pop
  """
4453 a8083063 Iustin Pop
  HPATH = "instance-export"
4454 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4455 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4456 a8083063 Iustin Pop
4457 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4458 a8083063 Iustin Pop
    """Build hooks env.
4459 a8083063 Iustin Pop

4460 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4461 a8083063 Iustin Pop

4462 a8083063 Iustin Pop
    """
4463 a8083063 Iustin Pop
    env = {
4464 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4465 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4466 a8083063 Iustin Pop
      }
4467 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4468 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4469 a8083063 Iustin Pop
          self.op.target_node]
4470 a8083063 Iustin Pop
    return env, nl, nl
4471 a8083063 Iustin Pop
4472 a8083063 Iustin Pop
  def CheckPrereq(self):
4473 a8083063 Iustin Pop
    """Check prerequisites.
4474 a8083063 Iustin Pop

4475 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
4476 a8083063 Iustin Pop

4477 a8083063 Iustin Pop
    """
4478 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4479 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4480 a8083063 Iustin Pop
    if self.instance is None:
4481 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4482 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4483 a8083063 Iustin Pop
4484 a8083063 Iustin Pop
    # node verification
4485 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4486 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4487 a8083063 Iustin Pop
4488 a8083063 Iustin Pop
    if self.dst_node is None:
4489 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4490 3ecf6786 Iustin Pop
                                 self.op.target_node)
4491 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4492 a8083063 Iustin Pop
4493 b6023d6c Manuel Franceschini
    # instance disk type verification
4494 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
4495 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
4496 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
4497 b6023d6c Manuel Franceschini
                                   " file-based disks")
4498 b6023d6c Manuel Franceschini
4499 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4500 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4501 a8083063 Iustin Pop

4502 a8083063 Iustin Pop
    """
4503 a8083063 Iustin Pop
    instance = self.instance
4504 a8083063 Iustin Pop
    dst_node = self.dst_node
4505 a8083063 Iustin Pop
    src_node = instance.primary_node
4506 a8083063 Iustin Pop
    if self.op.shutdown:
4507 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
4508 fb300fb7 Guido Trotter
      if not rpc.call_instance_shutdown(src_node, instance):
4509 fb300fb7 Guido Trotter
         raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4510 b4de68a9 Iustin Pop
                                  (instance.name, src_node))
4511 a8083063 Iustin Pop
4512 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4513 a8083063 Iustin Pop
4514 a8083063 Iustin Pop
    snap_disks = []
4515 a8083063 Iustin Pop
4516 a8083063 Iustin Pop
    try:
4517 a8083063 Iustin Pop
      for disk in instance.disks:
4518 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4519 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4520 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4521 a8083063 Iustin Pop
4522 a8083063 Iustin Pop
          if not new_dev_name:
4523 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4524 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4525 a8083063 Iustin Pop
          else:
4526 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4527 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4528 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4529 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4530 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4531 a8083063 Iustin Pop
4532 a8083063 Iustin Pop
    finally:
4533 fb300fb7 Guido Trotter
      if self.op.shutdown and instance.status == "up":
4534 fb300fb7 Guido Trotter
        if not rpc.call_instance_start(src_node, instance, None):
4535 fb300fb7 Guido Trotter
          _ShutdownInstanceDisks(instance, self.cfg)
4536 fb300fb7 Guido Trotter
          raise errors.OpExecError("Could not start instance")
4537 a8083063 Iustin Pop
4538 a8083063 Iustin Pop
    # TODO: check for size
4539 a8083063 Iustin Pop
4540 a8083063 Iustin Pop
    for dev in snap_disks:
4541 16687b98 Manuel Franceschini
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
4542 16687b98 Manuel Franceschini
        logger.Error("could not export block device %s from node %s to node %s"
4543 16687b98 Manuel Franceschini
                     % (dev.logical_id[1], src_node, dst_node.name))
4544 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4545 16687b98 Manuel Franceschini
        logger.Error("could not remove snapshot block device %s from node %s" %
4546 16687b98 Manuel Franceschini
                     (dev.logical_id[1], src_node))
4547 a8083063 Iustin Pop
4548 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4549 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4550 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4551 a8083063 Iustin Pop
4552 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4553 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4554 a8083063 Iustin Pop
4555 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4556 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4557 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4558 a8083063 Iustin Pop
    if nodelist:
4559 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4560 5bfac263 Iustin Pop
      exportlist = self.proc.ChainOpCode(op)
4561 a8083063 Iustin Pop
      for node in exportlist:
4562 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4563 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4564 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4565 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4566 5c947f38 Iustin Pop
4567 5c947f38 Iustin Pop
4568 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
4569 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
4570 9ac99fda Guido Trotter

4571 9ac99fda Guido Trotter
  """
4572 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
4573 9ac99fda Guido Trotter
4574 9ac99fda Guido Trotter
  def CheckPrereq(self):
4575 9ac99fda Guido Trotter
    """Check prerequisites.
4576 9ac99fda Guido Trotter
    """
4577 9ac99fda Guido Trotter
    pass
4578 9ac99fda Guido Trotter
4579 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
4580 9ac99fda Guido Trotter
    """Remove any export.
4581 9ac99fda Guido Trotter

4582 9ac99fda Guido Trotter
    """
4583 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4584 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
4585 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
4586 9ac99fda Guido Trotter
    fqdn_warn = False
4587 9ac99fda Guido Trotter
    if not instance_name:
4588 9ac99fda Guido Trotter
      fqdn_warn = True
4589 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
4590 9ac99fda Guido Trotter
4591 9ac99fda Guido Trotter
    op = opcodes.OpQueryExports(nodes=[])
4592 9ac99fda Guido Trotter
    exportlist = self.proc.ChainOpCode(op)
4593 9ac99fda Guido Trotter
    found = False
4594 9ac99fda Guido Trotter
    for node in exportlist:
4595 9ac99fda Guido Trotter
      if instance_name in exportlist[node]:
4596 9ac99fda Guido Trotter
        found = True
4597 9ac99fda Guido Trotter
        if not rpc.call_export_remove(node, instance_name):
4598 9ac99fda Guido Trotter
          logger.Error("could not remove export for instance %s"
4599 9ac99fda Guido Trotter
                       " on node %s" % (instance_name, node))
4600 9ac99fda Guido Trotter
4601 9ac99fda Guido Trotter
    if fqdn_warn and not found:
4602 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
4603 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
4604 9ac99fda Guido Trotter
                  " Domain Name.")
4605 9ac99fda Guido Trotter
4606 9ac99fda Guido Trotter
4607 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4608 5c947f38 Iustin Pop
  """Generic tags LU.
4609 5c947f38 Iustin Pop

4610 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4611 5c947f38 Iustin Pop

4612 5c947f38 Iustin Pop
  """
4613 5c947f38 Iustin Pop
  def CheckPrereq(self):
4614 5c947f38 Iustin Pop
    """Check prerequisites.
4615 5c947f38 Iustin Pop

4616 5c947f38 Iustin Pop
    """
4617 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4618 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4619 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4620 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4621 5c947f38 Iustin Pop
      if name is None:
4622 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4623 3ecf6786 Iustin Pop
                                   (self.op.name,))
4624 5c947f38 Iustin Pop
      self.op.name = name
4625 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4626 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4627 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4628 5c947f38 Iustin Pop
      if name is None:
4629 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4630 3ecf6786 Iustin Pop
                                   (self.op.name,))
4631 5c947f38 Iustin Pop
      self.op.name = name
4632 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4633 5c947f38 Iustin Pop
    else:
4634 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4635 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4636 5c947f38 Iustin Pop
4637 5c947f38 Iustin Pop
4638 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4639 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4640 5c947f38 Iustin Pop

4641 5c947f38 Iustin Pop
  """
4642 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4643 5c947f38 Iustin Pop
4644 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4645 5c947f38 Iustin Pop
    """Returns the tag list.
4646 5c947f38 Iustin Pop

4647 5c947f38 Iustin Pop
    """
4648 5c947f38 Iustin Pop
    return self.target.GetTags()
4649 5c947f38 Iustin Pop
4650 5c947f38 Iustin Pop
4651 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4652 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4653 73415719 Iustin Pop

4654 73415719 Iustin Pop
  """
4655 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4656 73415719 Iustin Pop
4657 73415719 Iustin Pop
  def CheckPrereq(self):
4658 73415719 Iustin Pop
    """Check prerequisites.
4659 73415719 Iustin Pop

4660 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4661 73415719 Iustin Pop

4662 73415719 Iustin Pop
    """
4663 73415719 Iustin Pop
    try:
4664 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4665 73415719 Iustin Pop
    except re.error, err:
4666 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4667 73415719 Iustin Pop
                                 (self.op.pattern, err))
4668 73415719 Iustin Pop
4669 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4670 73415719 Iustin Pop
    """Returns the tag list.
4671 73415719 Iustin Pop

4672 73415719 Iustin Pop
    """
4673 73415719 Iustin Pop
    cfg = self.cfg
4674 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4675 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4676 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4677 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4678 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4679 73415719 Iustin Pop
    results = []
4680 73415719 Iustin Pop
    for path, target in tgts:
4681 73415719 Iustin Pop
      for tag in target.GetTags():
4682 73415719 Iustin Pop
        if self.re.search(tag):
4683 73415719 Iustin Pop
          results.append((path, tag))
4684 73415719 Iustin Pop
    return results
4685 73415719 Iustin Pop
4686 73415719 Iustin Pop
4687 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4688 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4689 5c947f38 Iustin Pop

4690 5c947f38 Iustin Pop
  """
4691 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4692 5c947f38 Iustin Pop
4693 5c947f38 Iustin Pop
  def CheckPrereq(self):
4694 5c947f38 Iustin Pop
    """Check prerequisites.
4695 5c947f38 Iustin Pop

4696 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4697 5c947f38 Iustin Pop

4698 5c947f38 Iustin Pop
    """
4699 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4700 f27302fa Iustin Pop
    for tag in self.op.tags:
4701 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4702 5c947f38 Iustin Pop
4703 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4704 5c947f38 Iustin Pop
    """Sets the tag.
4705 5c947f38 Iustin Pop

4706 5c947f38 Iustin Pop
    """
4707 5c947f38 Iustin Pop
    try:
4708 f27302fa Iustin Pop
      for tag in self.op.tags:
4709 f27302fa Iustin Pop
        self.target.AddTag(tag)
4710 5c947f38 Iustin Pop
    except errors.TagError, err:
4711 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4712 5c947f38 Iustin Pop
    try:
4713 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4714 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4715 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4716 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4717 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4718 5c947f38 Iustin Pop
4719 5c947f38 Iustin Pop
4720 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4721 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4722 5c947f38 Iustin Pop

4723 5c947f38 Iustin Pop
  """
4724 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4725 5c947f38 Iustin Pop
4726 5c947f38 Iustin Pop
  def CheckPrereq(self):
4727 5c947f38 Iustin Pop
    """Check prerequisites.
4728 5c947f38 Iustin Pop

4729 5c947f38 Iustin Pop
    This checks that we have the given tag.
4730 5c947f38 Iustin Pop

4731 5c947f38 Iustin Pop
    """
4732 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4733 f27302fa Iustin Pop
    for tag in self.op.tags:
4734 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4735 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4736 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4737 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4738 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4739 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4740 f27302fa Iustin Pop
      diff_names.sort()
4741 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4742 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4743 5c947f38 Iustin Pop
4744 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4745 5c947f38 Iustin Pop
    """Remove the tag from the object.
4746 5c947f38 Iustin Pop

4747 5c947f38 Iustin Pop
    """
4748 f27302fa Iustin Pop
    for tag in self.op.tags:
4749 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4750 5c947f38 Iustin Pop
    try:
4751 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4752 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4753 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4754 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4755 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4756 06009e27 Iustin Pop
4757 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
4758 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
4759 06009e27 Iustin Pop

4760 06009e27 Iustin Pop
  This LU sleeps on the master and/or nodes for a specified amoutn of
4761 06009e27 Iustin Pop
  time.
4762 06009e27 Iustin Pop

4763 06009e27 Iustin Pop
  """
4764 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
4765 06009e27 Iustin Pop
4766 06009e27 Iustin Pop
  def CheckPrereq(self):
4767 06009e27 Iustin Pop
    """Check prerequisites.
4768 06009e27 Iustin Pop

4769 06009e27 Iustin Pop
    This checks that we have a good list of nodes and/or the duration
4770 06009e27 Iustin Pop
    is valid.
4771 06009e27 Iustin Pop

4772 06009e27 Iustin Pop
    """
4773 06009e27 Iustin Pop
4774 06009e27 Iustin Pop
    if self.op.on_nodes:
4775 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
4776 06009e27 Iustin Pop
4777 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
4778 06009e27 Iustin Pop
    """Do the actual sleep.
4779 06009e27 Iustin Pop

4780 06009e27 Iustin Pop
    """
4781 06009e27 Iustin Pop
    if self.op.on_master:
4782 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
4783 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
4784 06009e27 Iustin Pop
    if self.op.on_nodes:
4785 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
4786 06009e27 Iustin Pop
      if not result:
4787 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
4788 06009e27 Iustin Pop
      for node, node_result in result.items():
4789 06009e27 Iustin Pop
        if not node_result:
4790 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
4791 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))
4792 d61df03e Iustin Pop
4793 d61df03e Iustin Pop
4794 d1c2dd75 Iustin Pop
class IAllocator(object):
4795 d1c2dd75 Iustin Pop
  """IAllocator framework.
4796 d61df03e Iustin Pop

4797 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
4798 d1c2dd75 Iustin Pop
    - cfg/sstore that are needed to query the cluster
4799 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
4800 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
4801 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
4802 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
4803 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
4804 d1c2dd75 Iustin Pop
      easy usage
4805 d61df03e Iustin Pop

4806 d61df03e Iustin Pop
  """
4807 29859cb7 Iustin Pop
  _ALLO_KEYS = [
4808 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
4809 d1c2dd75 Iustin Pop
    "os", "tags", "nics", "vcpus",
4810 d1c2dd75 Iustin Pop
    ]
4811 29859cb7 Iustin Pop
  _RELO_KEYS = [
4812 29859cb7 Iustin Pop
    "relocate_from",
4813 29859cb7 Iustin Pop
    ]
4814 d1c2dd75 Iustin Pop
4815 29859cb7 Iustin Pop
  def __init__(self, cfg, sstore, mode, name, **kwargs):
4816 d1c2dd75 Iustin Pop
    self.cfg = cfg
4817 d1c2dd75 Iustin Pop
    self.sstore = sstore
4818 d1c2dd75 Iustin Pop
    # init buffer variables
4819 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
4820 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
4821 29859cb7 Iustin Pop
    self.mode = mode
4822 29859cb7 Iustin Pop
    self.name = name
4823 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
4824 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
4825 29859cb7 Iustin Pop
    self.relocate_from = None
4826 27579978 Iustin Pop
    # computed fields
4827 27579978 Iustin Pop
    self.required_nodes = None
4828 d1c2dd75 Iustin Pop
    # init result fields
4829 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
4830 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
4831 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
4832 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
4833 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
4834 29859cb7 Iustin Pop
    else:
4835 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
4836 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
4837 d1c2dd75 Iustin Pop
    for key in kwargs:
4838 29859cb7 Iustin Pop
      if key not in keyset:
4839 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
4840 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4841 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
4842 29859cb7 Iustin Pop
    for key in keyset:
4843 d1c2dd75 Iustin Pop
      if key not in kwargs:
4844 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
4845 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
4846 d1c2dd75 Iustin Pop
    self._BuildInputData()
4847 d1c2dd75 Iustin Pop
4848 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
4849 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
4850 d1c2dd75 Iustin Pop

4851 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
4852 d1c2dd75 Iustin Pop

4853 d1c2dd75 Iustin Pop
    """
4854 d1c2dd75 Iustin Pop
    cfg = self.cfg
4855 d1c2dd75 Iustin Pop
    # cluster data
4856 d1c2dd75 Iustin Pop
    data = {
4857 d1c2dd75 Iustin Pop
      "version": 1,
4858 d1c2dd75 Iustin Pop
      "cluster_name": self.sstore.GetClusterName(),
4859 d1c2dd75 Iustin Pop
      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
4860 6286519f Iustin Pop
      "hypervisor_type": self.sstore.GetHypervisorType(),
4861 d1c2dd75 Iustin Pop
      # we don't have job IDs
4862 d61df03e Iustin Pop
      }
4863 d61df03e Iustin Pop
4864 6286519f Iustin Pop
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
4865 6286519f Iustin Pop
4866 d1c2dd75 Iustin Pop
    # node data
4867 d1c2dd75 Iustin Pop
    node_results = {}
4868 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
4869 d1c2dd75 Iustin Pop
    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
4870 d1c2dd75 Iustin Pop
    for nname in node_list:
4871 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
4872 d1c2dd75 Iustin Pop
      if nname not in node_data or not isinstance(node_data[nname], dict):
4873 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't get data for node %s" % nname)
4874 d1c2dd75 Iustin Pop
      remote_info = node_data[nname]
4875 b2662e7f Iustin Pop
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
4876 d1c2dd75 Iustin Pop
                   'vg_size', 'vg_free']:
4877 d1c2dd75 Iustin Pop
        if attr not in remote_info:
4878 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
4879 d1c2dd75 Iustin Pop
                                   (nname, attr))
4880 d1c2dd75 Iustin Pop
        try:
4881 b2662e7f Iustin Pop
          remote_info[attr] = int(remote_info[attr])
4882 d1c2dd75 Iustin Pop
        except ValueError, err:
4883 d1c2dd75 Iustin Pop
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
4884 d1c2dd75 Iustin Pop
                                   " %s" % (nname, attr, str(err)))
4885 6286519f Iustin Pop
      # compute memory used by primary instances
4886 6286519f Iustin Pop
      i_p_mem = i_p_up_mem = 0
4887 6286519f Iustin Pop
      for iinfo in i_list:
4888 6286519f Iustin Pop
        if iinfo.primary_node == nname:
4889 6286519f Iustin Pop
          i_p_mem += iinfo.memory
4890 6286519f Iustin Pop
          if iinfo.status == "up":
4891 6286519f Iustin Pop
            i_p_up_mem += iinfo.memory
4892 6286519f Iustin Pop
4893 b2662e7f Iustin Pop
      # compute memory used by instances
4894 d1c2dd75 Iustin Pop
      pnr = {
4895 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
4896 b2662e7f Iustin Pop
        "total_memory": remote_info['memory_total'],
4897 b2662e7f Iustin Pop
        "reserved_memory": remote_info['memory_dom0'],
4898 b2662e7f Iustin Pop
        "free_memory": remote_info['memory_free'],
4899 6286519f Iustin Pop
        "i_pri_memory": i_p_mem,
4900 6286519f Iustin Pop
        "i_pri_up_memory": i_p_up_mem,
4901 b2662e7f Iustin Pop
        "total_disk": remote_info['vg_size'],
4902 b2662e7f Iustin Pop
        "free_disk": remote_info['vg_free'],
4903 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
4904 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
4905 d1c2dd75 Iustin Pop
        }
4906 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
4907 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
4908 d1c2dd75 Iustin Pop
4909 d1c2dd75 Iustin Pop
    # instance data
4910 d1c2dd75 Iustin Pop
    instance_data = {}
4911 6286519f Iustin Pop
    for iinfo in i_list:
4912 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
4913 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
4914 d1c2dd75 Iustin Pop
      pir = {
4915 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
4916 d1c2dd75 Iustin Pop
        "should_run": iinfo.status == "up",
4917 d1c2dd75 Iustin Pop
        "vcpus": iinfo.vcpus,
4918 d1c2dd75 Iustin Pop
        "memory": iinfo.memory,
4919 d1c2dd75 Iustin Pop
        "os": iinfo.os,
4920 d1c2dd75 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
4921 d1c2dd75 Iustin Pop
        "nics": nic_data,
4922 d1c2dd75 Iustin Pop
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
4923 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
4924 d1c2dd75 Iustin Pop
        }
4925 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
4926 d61df03e Iustin Pop
4927 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
4928 d61df03e Iustin Pop
4929 d1c2dd75 Iustin Pop
    self.in_data = data
4930 d61df03e Iustin Pop
4931 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
4932 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
4933 d61df03e Iustin Pop

4934 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
4935 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
4936 d61df03e Iustin Pop

4937 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
4938 d1c2dd75 Iustin Pop
    done.
4939 d61df03e Iustin Pop

4940 d1c2dd75 Iustin Pop
    """
4941 d1c2dd75 Iustin Pop
    data = self.in_data
4942 d1c2dd75 Iustin Pop
    if len(self.disks) != 2:
4943 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Only two-disk configurations supported")
4944 d1c2dd75 Iustin Pop
4945 d1c2dd75 Iustin Pop
    disk_space = _ComputeDiskSize(self.disk_template,
4946 d1c2dd75 Iustin Pop
                                  self.disks[0]["size"], self.disks[1]["size"])
4947 d1c2dd75 Iustin Pop
4948 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
4949 27579978 Iustin Pop
      self.required_nodes = 2
4950 27579978 Iustin Pop
    else:
4951 27579978 Iustin Pop
      self.required_nodes = 1
4952 d1c2dd75 Iustin Pop
    request = {
4953 d1c2dd75 Iustin Pop
      "type": "allocate",
4954 d1c2dd75 Iustin Pop
      "name": self.name,
4955 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
4956 d1c2dd75 Iustin Pop
      "tags": self.tags,
4957 d1c2dd75 Iustin Pop
      "os": self.os,
4958 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
4959 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
4960 d1c2dd75 Iustin Pop
      "disks": self.disks,
4961 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
4962 d1c2dd75 Iustin Pop
      "nics": self.nics,
4963 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
4964 d1c2dd75 Iustin Pop
      }
4965 d1c2dd75 Iustin Pop
    data["request"] = request
4966 298fe380 Iustin Pop
4967 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
4968 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
4969 298fe380 Iustin Pop

4970 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
4971 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
4972 d61df03e Iustin Pop

4973 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
4974 d1c2dd75 Iustin Pop
    done.
4975 d61df03e Iustin Pop

4976 d1c2dd75 Iustin Pop
    """
4977 27579978 Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.name)
4978 27579978 Iustin Pop
    if instance is None:
4979 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
4980 27579978 Iustin Pop
                                   " IAllocator" % self.name)
4981 27579978 Iustin Pop
4982 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4983 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
4984 27579978 Iustin Pop
4985 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
4986 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
4987 2a139bb0 Iustin Pop
4988 27579978 Iustin Pop
    self.required_nodes = 1
4989 27579978 Iustin Pop
4990 27579978 Iustin Pop
    disk_space = _ComputeDiskSize(instance.disk_template,
4991 27579978 Iustin Pop
                                  instance.disks[0].size,
4992 27579978 Iustin Pop
                                  instance.disks[1].size)
4993 27579978 Iustin Pop
4994 d1c2dd75 Iustin Pop
    request = {
4995 2a139bb0 Iustin Pop
      "type": "relocate",
4996 d1c2dd75 Iustin Pop
      "name": self.name,
4997 27579978 Iustin Pop
      "disk_space_total": disk_space,
4998 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
4999 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
5000 d1c2dd75 Iustin Pop
      }
5001 27579978 Iustin Pop
    self.in_data["request"] = request
5002 d61df03e Iustin Pop
5003 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
5004 d1c2dd75 Iustin Pop
    """Build input data structures.
5005 d61df03e Iustin Pop

5006 d1c2dd75 Iustin Pop
    """
5007 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
5008 d61df03e Iustin Pop
5009 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5010 d1c2dd75 Iustin Pop
      self._AddNewInstance()
5011 d1c2dd75 Iustin Pop
    else:
5012 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
5013 d61df03e Iustin Pop
5014 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
5015 d61df03e Iustin Pop
5016 8d528b7c Iustin Pop
  def Run(self, name, validate=True, call_fn=rpc.call_iallocator_runner):
5017 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
5018 298fe380 Iustin Pop

5019 d1c2dd75 Iustin Pop
    """
5020 d1c2dd75 Iustin Pop
    data = self.in_text
5021 298fe380 Iustin Pop
5022 8d528b7c Iustin Pop
    result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
5023 298fe380 Iustin Pop
5024 8d528b7c Iustin Pop
    if not isinstance(result, tuple) or len(result) != 4:
5025 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
5026 8d528b7c Iustin Pop
5027 8d528b7c Iustin Pop
    rcode, stdout, stderr, fail = result
5028 8d528b7c Iustin Pop
5029 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
5030 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5031 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
5032 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Instance allocator call failed: %s,"
5033 d1c2dd75 Iustin Pop
                                 " output: %s" %
5034 8d528b7c Iustin Pop
                                 (fail, stdout+stderr))
5035 8d528b7c Iustin Pop
    self.out_text = stdout
5036 d1c2dd75 Iustin Pop
    if validate:
5037 d1c2dd75 Iustin Pop
      self._ValidateResult()
5038 298fe380 Iustin Pop
5039 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
5040 d1c2dd75 Iustin Pop
    """Process the allocator results.
5041 538475ca Iustin Pop

5042 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
5043 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
5044 538475ca Iustin Pop

5045 d1c2dd75 Iustin Pop
    """
5046 d1c2dd75 Iustin Pop
    try:
5047 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
5048 d1c2dd75 Iustin Pop
    except Exception, err:
5049 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5050 d1c2dd75 Iustin Pop
5051 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
5052 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5053 538475ca Iustin Pop
5054 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
5055 d1c2dd75 Iustin Pop
      if key not in rdict:
5056 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
5057 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
5058 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
5059 538475ca Iustin Pop
5060 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
5061 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5062 d1c2dd75 Iustin Pop
                               " is not a list")
5063 d1c2dd75 Iustin Pop
    self.out_data = rdict
5064 538475ca Iustin Pop
5065 538475ca Iustin Pop
5066 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
5067 d61df03e Iustin Pop
  """Run allocator tests.
5068 d61df03e Iustin Pop

5069 d61df03e Iustin Pop
  This LU runs the allocator tests
5070 d61df03e Iustin Pop

5071 d61df03e Iustin Pop
  """
5072 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
5073 d61df03e Iustin Pop
5074 d61df03e Iustin Pop
  def CheckPrereq(self):
5075 d61df03e Iustin Pop
    """Check prerequisites.
5076 d61df03e Iustin Pop

5077 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
5078 d61df03e Iustin Pop

5079 d61df03e Iustin Pop
    """
5080 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5081 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
5082 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
5083 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
5084 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5085 d61df03e Iustin Pop
                                     attr)
5086 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
5087 d61df03e Iustin Pop
      if iname is not None:
5088 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5089 d61df03e Iustin Pop
                                   iname)
5090 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
5091 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5092 d61df03e Iustin Pop
      for row in self.op.nics:
5093 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5094 d61df03e Iustin Pop
            "mac" not in row or
5095 d61df03e Iustin Pop
            "ip" not in row or
5096 d61df03e Iustin Pop
            "bridge" not in row):
5097 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5098 d61df03e Iustin Pop
                                     " 'nics' parameter")
5099 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
5100 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5101 298fe380 Iustin Pop
      if len(self.op.disks) != 2:
5102 298fe380 Iustin Pop
        raise errors.OpPrereqError("Only two-disk configurations supported")
5103 d61df03e Iustin Pop
      for row in self.op.disks:
5104 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
5105 d61df03e Iustin Pop
            "size" not in row or
5106 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
5107 d61df03e Iustin Pop
            "mode" not in row or
5108 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
5109 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
5110 d61df03e Iustin Pop
                                     " 'disks' parameter")
5111 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5112 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
5113 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5114 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
5115 d61df03e Iustin Pop
      if fname is None:
5116 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5117 d61df03e Iustin Pop
                                   self.op.name)
5118 d61df03e Iustin Pop
      self.op.name = fname
5119 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5120 d61df03e Iustin Pop
    else:
5121 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5122 d61df03e Iustin Pop
                                 self.op.mode)
5123 d61df03e Iustin Pop
5124 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5125 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5126 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
5127 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5128 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5129 d61df03e Iustin Pop
                                 self.op.direction)
5130 d61df03e Iustin Pop
5131 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
5132 d61df03e Iustin Pop
    """Run the allocator test.
5133 d61df03e Iustin Pop

5134 d61df03e Iustin Pop
    """
5135 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5136 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5137 29859cb7 Iustin Pop
                       mode=self.op.mode,
5138 29859cb7 Iustin Pop
                       name=self.op.name,
5139 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
5140 29859cb7 Iustin Pop
                       disks=self.op.disks,
5141 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
5142 29859cb7 Iustin Pop
                       os=self.op.os,
5143 29859cb7 Iustin Pop
                       tags=self.op.tags,
5144 29859cb7 Iustin Pop
                       nics=self.op.nics,
5145 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
5146 29859cb7 Iustin Pop
                       )
5147 29859cb7 Iustin Pop
    else:
5148 29859cb7 Iustin Pop
      ial = IAllocator(self.cfg, self.sstore,
5149 29859cb7 Iustin Pop
                       mode=self.op.mode,
5150 29859cb7 Iustin Pop
                       name=self.op.name,
5151 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
5152 29859cb7 Iustin Pop
                       )
5153 d61df03e Iustin Pop
5154 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5155 d1c2dd75 Iustin Pop
      result = ial.in_text
5156 298fe380 Iustin Pop
    else:
5157 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
5158 d1c2dd75 Iustin Pop
      result = ial.out_text
5159 298fe380 Iustin Pop
    return result