Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 17dfc522

History | View | Annotate | Download (151.6 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 a8083063 Iustin Pop
from ganeti import config
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 a8083063 Iustin Pop
from ganeti import ssconf
45 a8083063 Iustin Pop
46 7c0d6283 Michael Hanselmann
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
52 a8083063 Iustin Pop
      with all the fields (even if as None)
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
57 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
58 a8083063 Iustin Pop

59 a8083063 Iustin Pop
  """
60 a8083063 Iustin Pop
  HPATH = None
61 a8083063 Iustin Pop
  HTYPE = None
62 a8083063 Iustin Pop
  _OP_REQP = []
63 a8083063 Iustin Pop
  REQ_CLUSTER = True
64 a8083063 Iustin Pop
  REQ_MASTER = True
65 a8083063 Iustin Pop
66 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
67 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
68 a8083063 Iustin Pop

69 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
70 a8083063 Iustin Pop
    validity.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    """
73 5bfac263 Iustin Pop
    self.proc = processor
74 a8083063 Iustin Pop
    self.op = op
75 a8083063 Iustin Pop
    self.cfg = cfg
76 a8083063 Iustin Pop
    self.sstore = sstore
77 c92b310a Michael Hanselmann
    self.__ssh = None
78 c92b310a Michael Hanselmann
79 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
80 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
81 a8083063 Iustin Pop
      if attr_val is None:
82 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
83 3ecf6786 Iustin Pop
                                   attr_name)
84 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
85 a8083063 Iustin Pop
      if not cfg.IsCluster():
86 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
87 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
88 a8083063 Iustin Pop
      if self.REQ_MASTER:
89 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
90 89e1fc26 Iustin Pop
        if master != utils.HostInfo().name:
91 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
92 3ecf6786 Iustin Pop
                                     " node %s" % master)
93 a8083063 Iustin Pop
94 c92b310a Michael Hanselmann
  def __GetSSH(self):
95 c92b310a Michael Hanselmann
    """Returns the SshRunner object
96 c92b310a Michael Hanselmann

97 c92b310a Michael Hanselmann
    """
98 c92b310a Michael Hanselmann
    if not self.__ssh:
99 1ff08570 Michael Hanselmann
      self.__ssh = ssh.SshRunner(self.sstore)
100 c92b310a Michael Hanselmann
    return self.__ssh
101 c92b310a Michael Hanselmann
102 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
103 c92b310a Michael Hanselmann
104 a8083063 Iustin Pop
  def CheckPrereq(self):
105 a8083063 Iustin Pop
    """Check prerequisites for this LU.
106 a8083063 Iustin Pop

107 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
108 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
109 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
110 a8083063 Iustin Pop
    allowed.
111 a8083063 Iustin Pop

112 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
113 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
114 a8083063 Iustin Pop

115 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
116 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
117 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
118 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
119 a8083063 Iustin Pop

120 a8083063 Iustin Pop
    """
121 a8083063 Iustin Pop
    raise NotImplementedError
122 a8083063 Iustin Pop
123 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
124 a8083063 Iustin Pop
    """Execute the LU.
125 a8083063 Iustin Pop

126 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
127 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
128 a8083063 Iustin Pop
    code, or expected.
129 a8083063 Iustin Pop

130 a8083063 Iustin Pop
    """
131 a8083063 Iustin Pop
    raise NotImplementedError
132 a8083063 Iustin Pop
133 a8083063 Iustin Pop
  def BuildHooksEnv(self):
134 a8083063 Iustin Pop
    """Build hooks environment for this LU.
135 a8083063 Iustin Pop

136 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
137 a8083063 Iustin Pop
    containing the environment that will be used for running the
138 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
139 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
140 a8083063 Iustin Pop
    the hook should run after the execution.
141 a8083063 Iustin Pop

142 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
143 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
144 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
145 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
146 a8083063 Iustin Pop

147 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
148 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
149 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
150 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
151 a8083063 Iustin Pop
    None).
152 a8083063 Iustin Pop

153 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
154 a8083063 Iustin Pop
    not be called.
155 a8083063 Iustin Pop

156 a8083063 Iustin Pop
    """
157 a8083063 Iustin Pop
    raise NotImplementedError
158 a8083063 Iustin Pop
159 a8083063 Iustin Pop
160 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
161 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
162 a8083063 Iustin Pop

163 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
164 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
165 a8083063 Iustin Pop

166 a8083063 Iustin Pop
  """
167 a8083063 Iustin Pop
  HPATH = None
168 a8083063 Iustin Pop
  HTYPE = None
169 a8083063 Iustin Pop
170 a8083063 Iustin Pop
  def BuildHooksEnv(self):
171 a8083063 Iustin Pop
    """Build hooks env.
172 a8083063 Iustin Pop

173 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
174 a8083063 Iustin Pop

175 a8083063 Iustin Pop
    """
176 0e137c28 Iustin Pop
    return {}, [], []
177 a8083063 Iustin Pop
178 a8083063 Iustin Pop
179 9440aeab Michael Hanselmann
def _AddHostToEtcHosts(hostname):
180 9440aeab Michael Hanselmann
  """Wrapper around utils.SetEtcHostsEntry.
181 9440aeab Michael Hanselmann

182 9440aeab Michael Hanselmann
  """
183 9440aeab Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
184 9440aeab Michael Hanselmann
  utils.SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
185 9440aeab Michael Hanselmann
186 9440aeab Michael Hanselmann
187 c8a0948f Michael Hanselmann
def _RemoveHostFromEtcHosts(hostname):
188 9440aeab Michael Hanselmann
  """Wrapper around utils.RemoveEtcHostsEntry.
189 c8a0948f Michael Hanselmann

190 c8a0948f Michael Hanselmann
  """
191 c8a0948f Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
192 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
193 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
194 c8a0948f Michael Hanselmann
195 c8a0948f Michael Hanselmann
196 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
197 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
198 83120a01 Michael Hanselmann

199 83120a01 Michael Hanselmann
  Args:
200 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
201 83120a01 Michael Hanselmann

202 83120a01 Michael Hanselmann
  """
203 3312b702 Iustin Pop
  if not isinstance(nodes, list):
204 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
205 dcb93971 Michael Hanselmann
206 dcb93971 Michael Hanselmann
  if nodes:
207 3312b702 Iustin Pop
    wanted = []
208 dcb93971 Michael Hanselmann
209 dcb93971 Michael Hanselmann
    for name in nodes:
210 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
211 dcb93971 Michael Hanselmann
      if node is None:
212 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
213 3312b702 Iustin Pop
      wanted.append(node)
214 dcb93971 Michael Hanselmann
215 dcb93971 Michael Hanselmann
  else:
216 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
217 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
218 3312b702 Iustin Pop
219 3312b702 Iustin Pop
220 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
221 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
222 3312b702 Iustin Pop

223 3312b702 Iustin Pop
  Args:
224 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
225 3312b702 Iustin Pop

226 3312b702 Iustin Pop
  """
227 3312b702 Iustin Pop
  if not isinstance(instances, list):
228 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
229 3312b702 Iustin Pop
230 3312b702 Iustin Pop
  if instances:
231 3312b702 Iustin Pop
    wanted = []
232 3312b702 Iustin Pop
233 3312b702 Iustin Pop
    for name in instances:
234 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
235 3312b702 Iustin Pop
      if instance is None:
236 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
237 3312b702 Iustin Pop
      wanted.append(instance)
238 3312b702 Iustin Pop
239 3312b702 Iustin Pop
  else:
240 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
241 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
242 dcb93971 Michael Hanselmann
243 dcb93971 Michael Hanselmann
244 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
245 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
246 83120a01 Michael Hanselmann

247 83120a01 Michael Hanselmann
  Args:
248 83120a01 Michael Hanselmann
    static: Static fields
249 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
250 83120a01 Michael Hanselmann

251 83120a01 Michael Hanselmann
  """
252 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
253 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
254 dcb93971 Michael Hanselmann
255 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
256 dcb93971 Michael Hanselmann
257 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
258 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
259 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
260 3ecf6786 Iustin Pop
                                          difference(all_fields)))
261 dcb93971 Michael Hanselmann
262 dcb93971 Michael Hanselmann
263 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
264 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
265 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
266 ecb215b5 Michael Hanselmann

267 ecb215b5 Michael Hanselmann
  Args:
268 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
269 396e1b78 Michael Hanselmann
  """
270 396e1b78 Michael Hanselmann
  env = {
271 0e137c28 Iustin Pop
    "OP_TARGET": name,
272 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
273 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
274 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
275 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
276 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
277 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
278 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
279 396e1b78 Michael Hanselmann
  }
280 396e1b78 Michael Hanselmann
281 396e1b78 Michael Hanselmann
  if nics:
282 396e1b78 Michael Hanselmann
    nic_count = len(nics)
283 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
284 396e1b78 Michael Hanselmann
      if ip is None:
285 396e1b78 Michael Hanselmann
        ip = ""
286 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
287 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
288 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
289 396e1b78 Michael Hanselmann
  else:
290 396e1b78 Michael Hanselmann
    nic_count = 0
291 396e1b78 Michael Hanselmann
292 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
293 396e1b78 Michael Hanselmann
294 396e1b78 Michael Hanselmann
  return env
295 396e1b78 Michael Hanselmann
296 396e1b78 Michael Hanselmann
297 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
298 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
299 ecb215b5 Michael Hanselmann

300 ecb215b5 Michael Hanselmann
  Args:
301 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
302 ecb215b5 Michael Hanselmann
    override: dict of values to override
303 ecb215b5 Michael Hanselmann
  """
304 396e1b78 Michael Hanselmann
  args = {
305 396e1b78 Michael Hanselmann
    'name': instance.name,
306 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
307 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
308 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
309 396e1b78 Michael Hanselmann
    'status': instance.os,
310 396e1b78 Michael Hanselmann
    'memory': instance.memory,
311 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
312 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
313 396e1b78 Michael Hanselmann
  }
314 396e1b78 Michael Hanselmann
  if override:
315 396e1b78 Michael Hanselmann
    args.update(override)
316 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
317 396e1b78 Michael Hanselmann
318 396e1b78 Michael Hanselmann
319 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
320 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
321 a8083063 Iustin Pop

322 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
323 a8083063 Iustin Pop
  is the error message.
324 a8083063 Iustin Pop

325 a8083063 Iustin Pop
  """
326 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
327 a8083063 Iustin Pop
  if vgsize is None:
328 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
329 a8083063 Iustin Pop
  elif vgsize < 20480:
330 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
331 191a8385 Guido Trotter
            (vgname, vgsize))
332 a8083063 Iustin Pop
  return None
333 a8083063 Iustin Pop
334 a8083063 Iustin Pop
335 a8083063 Iustin Pop
def _InitSSHSetup(node):
336 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
337 a8083063 Iustin Pop

338 a8083063 Iustin Pop

339 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
340 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
341 a8083063 Iustin Pop

342 a8083063 Iustin Pop
  Args:
343 a8083063 Iustin Pop
    node: the name of this host as a fqdn
344 a8083063 Iustin Pop

345 a8083063 Iustin Pop
  """
346 70d9e3d8 Iustin Pop
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
347 a8083063 Iustin Pop
348 70d9e3d8 Iustin Pop
  for name in priv_key, pub_key:
349 70d9e3d8 Iustin Pop
    if os.path.exists(name):
350 70d9e3d8 Iustin Pop
      utils.CreateBackup(name)
351 70d9e3d8 Iustin Pop
    utils.RemoveFile(name)
352 a8083063 Iustin Pop
353 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
354 70d9e3d8 Iustin Pop
                         "-f", priv_key,
355 a8083063 Iustin Pop
                         "-q", "-N", ""])
356 a8083063 Iustin Pop
  if result.failed:
357 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
358 3ecf6786 Iustin Pop
                             result.output)
359 a8083063 Iustin Pop
360 70d9e3d8 Iustin Pop
  f = open(pub_key, 'r')
361 a8083063 Iustin Pop
  try:
362 70d9e3d8 Iustin Pop
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
363 a8083063 Iustin Pop
  finally:
364 a8083063 Iustin Pop
    f.close()
365 a8083063 Iustin Pop
366 a8083063 Iustin Pop
367 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
368 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
369 a8083063 Iustin Pop

370 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
371 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
372 a8083063 Iustin Pop

373 a8083063 Iustin Pop
  """
374 a8083063 Iustin Pop
  # Create pseudo random password
375 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
376 a8083063 Iustin Pop
  # and write it into sstore
377 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
378 a8083063 Iustin Pop
379 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
380 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
381 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
382 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
383 a8083063 Iustin Pop
  if result.failed:
384 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
385 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
386 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
387 a8083063 Iustin Pop
388 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
389 a8083063 Iustin Pop
390 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
391 a8083063 Iustin Pop
392 a8083063 Iustin Pop
  if result.failed:
393 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
394 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
395 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
396 a8083063 Iustin Pop
397 a8083063 Iustin Pop
398 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
399 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
400 bf6929a2 Alexander Schreiber

401 bf6929a2 Alexander Schreiber
  """
402 bf6929a2 Alexander Schreiber
  # check bridges existance
403 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
404 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
405 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
406 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
407 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
408 bf6929a2 Alexander Schreiber
409 bf6929a2 Alexander Schreiber
410 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
411 a8083063 Iustin Pop
  """Initialise the cluster.
412 a8083063 Iustin Pop

413 a8083063 Iustin Pop
  """
414 a8083063 Iustin Pop
  HPATH = "cluster-init"
415 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
416 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
417 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
418 a8083063 Iustin Pop
  REQ_CLUSTER = False
419 a8083063 Iustin Pop
420 a8083063 Iustin Pop
  def BuildHooksEnv(self):
421 a8083063 Iustin Pop
    """Build hooks env.
422 a8083063 Iustin Pop

423 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
424 a8083063 Iustin Pop
    ourselves in the post-run node list.
425 a8083063 Iustin Pop

426 a8083063 Iustin Pop
    """
427 0e137c28 Iustin Pop
    env = {"OP_TARGET": self.op.cluster_name}
428 0e137c28 Iustin Pop
    return env, [], [self.hostname.name]
429 a8083063 Iustin Pop
430 a8083063 Iustin Pop
  def CheckPrereq(self):
431 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
432 a8083063 Iustin Pop

433 a8083063 Iustin Pop
    """
434 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
435 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
436 a8083063 Iustin Pop
437 2a6469d5 Alexander Schreiber
    if self.op.hypervisor_type == constants.HT_XEN_HVM31:
438 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
439 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Please prepare the cluster VNC"
440 2a6469d5 Alexander Schreiber
                                   "password file %s" %
441 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
442 2a6469d5 Alexander Schreiber
443 89e1fc26 Iustin Pop
    self.hostname = hostname = utils.HostInfo()
444 ff98055b Iustin Pop
445 bcf043c9 Iustin Pop
    if hostname.ip.startswith("127."):
446 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
447 107711b0 Michael Hanselmann
                                 " range (%s). Please fix DNS or %s." %
448 107711b0 Michael Hanselmann
                                 (hostname.ip, constants.ETC_HOSTS))
449 130e907e Iustin Pop
450 b15d625f Iustin Pop
    if not utils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT,
451 b15d625f Iustin Pop
                         source=constants.LOCALHOST_IP_ADDRESS):
452 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
453 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
454 3ecf6786 Iustin Pop
                                 " belong to this host."
455 bcf043c9 Iustin Pop
                                 " Aborting." % hostname.ip)
456 a8083063 Iustin Pop
457 411f8ad0 Iustin Pop
    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
458 411f8ad0 Iustin Pop
459 411f8ad0 Iustin Pop
    if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
460 411f8ad0 Iustin Pop
                     timeout=5):
461 411f8ad0 Iustin Pop
      raise errors.OpPrereqError("Cluster IP already active. Aborting.")
462 411f8ad0 Iustin Pop
463 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
464 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
465 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
466 16abfbc2 Alexander Schreiber
    if (secondary_ip and
467 16abfbc2 Alexander Schreiber
        secondary_ip != hostname.ip and
468 b15d625f Iustin Pop
        (not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
469 b15d625f Iustin Pop
                           source=constants.LOCALHOST_IP_ADDRESS))):
470 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("You gave %s as secondary IP,"
471 f4bc1f2c Michael Hanselmann
                                 " but it does not belong to this host." %
472 16abfbc2 Alexander Schreiber
                                 secondary_ip)
473 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
474 a8083063 Iustin Pop
475 a8083063 Iustin Pop
    # checks presence of the volume group given
476 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
477 a8083063 Iustin Pop
478 a8083063 Iustin Pop
    if vgstatus:
479 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Error: %s" % vgstatus)
480 a8083063 Iustin Pop
481 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
482 a8083063 Iustin Pop
                    self.op.mac_prefix):
483 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
484 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
485 a8083063 Iustin Pop
486 2584d4a4 Alexander Schreiber
    if self.op.hypervisor_type not in constants.HYPER_TYPES:
487 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
488 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
489 a8083063 Iustin Pop
490 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
491 880478f8 Iustin Pop
    if result.failed:
492 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
493 8925faaa Iustin Pop
                                 (self.op.master_netdev,
494 8925faaa Iustin Pop
                                  result.output.strip()))
495 880478f8 Iustin Pop
496 7dd30006 Michael Hanselmann
    if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
497 7dd30006 Michael Hanselmann
            os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
498 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("Init.d script '%s' missing or not"
499 f4bc1f2c Michael Hanselmann
                                 " executable." % constants.NODE_INITD_SCRIPT)
500 c7b46d59 Iustin Pop
501 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
502 a8083063 Iustin Pop
    """Initialize the cluster.
503 a8083063 Iustin Pop

504 a8083063 Iustin Pop
    """
505 a8083063 Iustin Pop
    clustername = self.clustername
506 a8083063 Iustin Pop
    hostname = self.hostname
507 a8083063 Iustin Pop
508 a8083063 Iustin Pop
    # set up the simple store
509 4167825b Iustin Pop
    self.sstore = ss = ssconf.SimpleStore()
510 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
511 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
512 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
513 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
514 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
515 a8083063 Iustin Pop
516 a8083063 Iustin Pop
    # set up the inter-node password and certificate
517 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
518 a8083063 Iustin Pop
519 a8083063 Iustin Pop
    # start the master ip
520 bcf043c9 Iustin Pop
    rpc.call_node_start_master(hostname.name)
521 a8083063 Iustin Pop
522 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
523 70d9e3d8 Iustin Pop
    f = open(constants.SSH_HOST_RSA_PUB, 'r')
524 a8083063 Iustin Pop
    try:
525 a8083063 Iustin Pop
      sshline = f.read()
526 a8083063 Iustin Pop
    finally:
527 a8083063 Iustin Pop
      f.close()
528 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
529 a8083063 Iustin Pop
530 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(hostname.name)
531 bcf043c9 Iustin Pop
    _InitSSHSetup(hostname.name)
532 a8083063 Iustin Pop
533 a8083063 Iustin Pop
    # init of cluster config file
534 4167825b Iustin Pop
    self.cfg = cfgw = config.ConfigWriter()
535 bcf043c9 Iustin Pop
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
536 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
537 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
538 a8083063 Iustin Pop
539 f408b346 Michael Hanselmann
    ssh.WriteKnownHostsFile(cfgw, ss, constants.SSH_KNOWN_HOSTS_FILE)
540 f408b346 Michael Hanselmann
541 a8083063 Iustin Pop
542 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
543 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
544 a8083063 Iustin Pop

545 a8083063 Iustin Pop
  """
546 a8083063 Iustin Pop
  _OP_REQP = []
547 a8083063 Iustin Pop
548 a8083063 Iustin Pop
  def CheckPrereq(self):
549 a8083063 Iustin Pop
    """Check prerequisites.
550 a8083063 Iustin Pop

551 a8083063 Iustin Pop
    This checks whether the cluster is empty.
552 a8083063 Iustin Pop

553 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
554 a8083063 Iustin Pop

555 a8083063 Iustin Pop
    """
556 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
557 a8083063 Iustin Pop
558 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
559 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
560 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
561 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
562 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
563 db915bd1 Michael Hanselmann
    if instancelist:
564 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
565 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
566 a8083063 Iustin Pop
567 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
568 a8083063 Iustin Pop
    """Destroys the cluster.
569 a8083063 Iustin Pop

570 a8083063 Iustin Pop
    """
571 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
572 c9064964 Iustin Pop
    if not rpc.call_node_stop_master(master):
573 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
574 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
575 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
576 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
577 c8a0948f Michael Hanselmann
    rpc.call_node_leave_cluster(master)
578 a8083063 Iustin Pop
579 a8083063 Iustin Pop
580 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
581 a8083063 Iustin Pop
  """Verifies the cluster status.
582 a8083063 Iustin Pop

583 a8083063 Iustin Pop
  """
584 a8083063 Iustin Pop
  _OP_REQP = []
585 a8083063 Iustin Pop
586 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
587 a8083063 Iustin Pop
                  remote_version, feedback_fn):
588 a8083063 Iustin Pop
    """Run multiple tests against a node.
589 a8083063 Iustin Pop

590 a8083063 Iustin Pop
    Test list:
591 a8083063 Iustin Pop
      - compares ganeti version
592 a8083063 Iustin Pop
      - checks vg existance and size > 20G
593 a8083063 Iustin Pop
      - checks config file checksum
594 a8083063 Iustin Pop
      - checks ssh to other nodes
595 a8083063 Iustin Pop

596 a8083063 Iustin Pop
    Args:
597 a8083063 Iustin Pop
      node: name of the node to check
598 a8083063 Iustin Pop
      file_list: required list of files
599 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
600 098c0958 Michael Hanselmann

601 a8083063 Iustin Pop
    """
602 a8083063 Iustin Pop
    # compares ganeti version
603 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
604 a8083063 Iustin Pop
    if not remote_version:
605 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
606 a8083063 Iustin Pop
      return True
607 a8083063 Iustin Pop
608 a8083063 Iustin Pop
    if local_version != remote_version:
609 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
610 a8083063 Iustin Pop
                      (local_version, node, remote_version))
611 a8083063 Iustin Pop
      return True
612 a8083063 Iustin Pop
613 a8083063 Iustin Pop
    # checks vg existance and size > 20G
614 a8083063 Iustin Pop
615 a8083063 Iustin Pop
    bad = False
616 a8083063 Iustin Pop
    if not vglist:
617 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
618 a8083063 Iustin Pop
                      (node,))
619 a8083063 Iustin Pop
      bad = True
620 a8083063 Iustin Pop
    else:
621 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
622 a8083063 Iustin Pop
      if vgstatus:
623 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
624 a8083063 Iustin Pop
        bad = True
625 a8083063 Iustin Pop
626 a8083063 Iustin Pop
    # checks config file checksum
627 a8083063 Iustin Pop
    # checks ssh to any
628 a8083063 Iustin Pop
629 a8083063 Iustin Pop
    if 'filelist' not in node_result:
630 a8083063 Iustin Pop
      bad = True
631 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
632 a8083063 Iustin Pop
    else:
633 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
634 a8083063 Iustin Pop
      for file_name in file_list:
635 a8083063 Iustin Pop
        if file_name not in remote_cksum:
636 a8083063 Iustin Pop
          bad = True
637 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
638 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
639 a8083063 Iustin Pop
          bad = True
640 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
641 a8083063 Iustin Pop
642 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
643 a8083063 Iustin Pop
      bad = True
644 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
645 a8083063 Iustin Pop
    else:
646 a8083063 Iustin Pop
      if node_result['nodelist']:
647 a8083063 Iustin Pop
        bad = True
648 a8083063 Iustin Pop
        for node in node_result['nodelist']:
649 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
650 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
651 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
652 a8083063 Iustin Pop
    if hyp_result is not None:
653 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
654 a8083063 Iustin Pop
    return bad
655 a8083063 Iustin Pop
656 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
657 a8083063 Iustin Pop
    """Verify an instance.
658 a8083063 Iustin Pop

659 a8083063 Iustin Pop
    This function checks to see if the required block devices are
660 a8083063 Iustin Pop
    available on the instance's node.
661 a8083063 Iustin Pop

662 a8083063 Iustin Pop
    """
663 a8083063 Iustin Pop
    bad = False
664 a8083063 Iustin Pop
665 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
666 a8083063 Iustin Pop
    if not instance in instancelist:
667 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
668 a8083063 Iustin Pop
                      (instance, instancelist))
669 a8083063 Iustin Pop
      bad = True
670 a8083063 Iustin Pop
671 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
672 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
673 a8083063 Iustin Pop
674 a8083063 Iustin Pop
    node_vol_should = {}
675 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
676 a8083063 Iustin Pop
677 a8083063 Iustin Pop
    for node in node_vol_should:
678 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
679 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
680 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
681 a8083063 Iustin Pop
                          (volume, node))
682 a8083063 Iustin Pop
          bad = True
683 a8083063 Iustin Pop
684 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
685 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
686 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
687 a8083063 Iustin Pop
                        (instance, node_current))
688 a8083063 Iustin Pop
        bad = True
689 a8083063 Iustin Pop
690 a8083063 Iustin Pop
    for node in node_instance:
691 a8083063 Iustin Pop
      if (not node == node_current):
692 a8083063 Iustin Pop
        if instance in node_instance[node]:
693 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
694 a8083063 Iustin Pop
                          (instance, node))
695 a8083063 Iustin Pop
          bad = True
696 a8083063 Iustin Pop
697 6a438c98 Michael Hanselmann
    return bad
698 a8083063 Iustin Pop
699 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
700 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
701 a8083063 Iustin Pop

702 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
703 a8083063 Iustin Pop
    reported as unknown.
704 a8083063 Iustin Pop

705 a8083063 Iustin Pop
    """
706 a8083063 Iustin Pop
    bad = False
707 a8083063 Iustin Pop
708 a8083063 Iustin Pop
    for node in node_vol_is:
709 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
710 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
711 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
712 a8083063 Iustin Pop
                      (volume, node))
713 a8083063 Iustin Pop
          bad = True
714 a8083063 Iustin Pop
    return bad
715 a8083063 Iustin Pop
716 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
717 a8083063 Iustin Pop
    """Verify the list of running instances.
718 a8083063 Iustin Pop

719 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
720 a8083063 Iustin Pop

721 a8083063 Iustin Pop
    """
722 a8083063 Iustin Pop
    bad = False
723 a8083063 Iustin Pop
    for node in node_instance:
724 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
725 a8083063 Iustin Pop
        if runninginstance not in instancelist:
726 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
727 a8083063 Iustin Pop
                          (runninginstance, node))
728 a8083063 Iustin Pop
          bad = True
729 a8083063 Iustin Pop
    return bad
730 a8083063 Iustin Pop
731 a8083063 Iustin Pop
  def CheckPrereq(self):
732 a8083063 Iustin Pop
    """Check prerequisites.
733 a8083063 Iustin Pop

734 a8083063 Iustin Pop
    This has no prerequisites.
735 a8083063 Iustin Pop

736 a8083063 Iustin Pop
    """
737 a8083063 Iustin Pop
    pass
738 a8083063 Iustin Pop
739 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
740 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
741 a8083063 Iustin Pop

742 a8083063 Iustin Pop
    """
743 a8083063 Iustin Pop
    bad = False
744 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
745 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
746 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
747 a8083063 Iustin Pop
748 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
749 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
750 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
751 a8083063 Iustin Pop
    node_volume = {}
752 a8083063 Iustin Pop
    node_instance = {}
753 a8083063 Iustin Pop
754 a8083063 Iustin Pop
    # FIXME: verify OS list
755 a8083063 Iustin Pop
    # do local checksums
756 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
757 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
758 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
759 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
760 a8083063 Iustin Pop
761 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
762 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
763 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
764 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
765 a8083063 Iustin Pop
    node_verify_param = {
766 a8083063 Iustin Pop
      'filelist': file_names,
767 a8083063 Iustin Pop
      'nodelist': nodelist,
768 a8083063 Iustin Pop
      'hypervisor': None,
769 a8083063 Iustin Pop
      }
770 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
771 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
772 a8083063 Iustin Pop
773 a8083063 Iustin Pop
    for node in nodelist:
774 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
775 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
776 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
777 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
778 a8083063 Iustin Pop
      bad = bad or result
779 a8083063 Iustin Pop
780 a8083063 Iustin Pop
      # node_volume
781 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
782 a8083063 Iustin Pop
783 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
784 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
785 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
786 b63ed789 Iustin Pop
        bad = True
787 b63ed789 Iustin Pop
        node_volume[node] = {}
788 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
789 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
790 a8083063 Iustin Pop
        bad = True
791 a8083063 Iustin Pop
        continue
792 b63ed789 Iustin Pop
      else:
793 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
794 a8083063 Iustin Pop
795 a8083063 Iustin Pop
      # node_instance
796 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
797 a8083063 Iustin Pop
      if type(nodeinstance) != list:
798 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
799 a8083063 Iustin Pop
        bad = True
800 a8083063 Iustin Pop
        continue
801 a8083063 Iustin Pop
802 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
803 a8083063 Iustin Pop
804 a8083063 Iustin Pop
    node_vol_should = {}
805 a8083063 Iustin Pop
806 a8083063 Iustin Pop
    for instance in instancelist:
807 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
808 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
809 a8083063 Iustin Pop
                                     feedback_fn)
810 a8083063 Iustin Pop
      bad = bad or result
811 a8083063 Iustin Pop
812 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
813 a8083063 Iustin Pop
814 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
815 a8083063 Iustin Pop
816 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
817 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
818 a8083063 Iustin Pop
                                       feedback_fn)
819 a8083063 Iustin Pop
    bad = bad or result
820 a8083063 Iustin Pop
821 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
822 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
823 a8083063 Iustin Pop
                                         feedback_fn)
824 a8083063 Iustin Pop
    bad = bad or result
825 a8083063 Iustin Pop
826 a8083063 Iustin Pop
    return int(bad)
827 a8083063 Iustin Pop
828 a8083063 Iustin Pop
829 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
830 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
831 2c95a8d4 Iustin Pop

832 2c95a8d4 Iustin Pop
  """
833 2c95a8d4 Iustin Pop
  _OP_REQP = []
834 2c95a8d4 Iustin Pop
835 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
836 2c95a8d4 Iustin Pop
    """Check prerequisites.
837 2c95a8d4 Iustin Pop

838 2c95a8d4 Iustin Pop
    This has no prerequisites.
839 2c95a8d4 Iustin Pop

840 2c95a8d4 Iustin Pop
    """
841 2c95a8d4 Iustin Pop
    pass
842 2c95a8d4 Iustin Pop
843 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
844 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
845 2c95a8d4 Iustin Pop

846 2c95a8d4 Iustin Pop
    """
847 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
848 2c95a8d4 Iustin Pop
849 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
850 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
851 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
852 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
853 2c95a8d4 Iustin Pop
854 2c95a8d4 Iustin Pop
    nv_dict = {}
855 2c95a8d4 Iustin Pop
    for inst in instances:
856 2c95a8d4 Iustin Pop
      inst_lvs = {}
857 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
858 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
859 2c95a8d4 Iustin Pop
        continue
860 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
861 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
862 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
863 2c95a8d4 Iustin Pop
        for vol in vol_list:
864 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
865 2c95a8d4 Iustin Pop
866 2c95a8d4 Iustin Pop
    if not nv_dict:
867 2c95a8d4 Iustin Pop
      return result
868 2c95a8d4 Iustin Pop
869 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
870 2c95a8d4 Iustin Pop
871 2c95a8d4 Iustin Pop
    to_act = set()
872 2c95a8d4 Iustin Pop
    for node in nodes:
873 2c95a8d4 Iustin Pop
      # node_volume
874 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
875 2c95a8d4 Iustin Pop
876 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
877 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
878 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
879 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
880 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
881 2c95a8d4 Iustin Pop
                    (node,))
882 2c95a8d4 Iustin Pop
        res_nodes.append(node)
883 2c95a8d4 Iustin Pop
        continue
884 2c95a8d4 Iustin Pop
885 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
886 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
887 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
888 b63ed789 Iustin Pop
            and inst.name not in res_instances):
889 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
890 2c95a8d4 Iustin Pop
891 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
892 b63ed789 Iustin Pop
    # data better
893 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
894 b63ed789 Iustin Pop
      if inst.name not in res_missing:
895 b63ed789 Iustin Pop
        res_missing[inst.name] = []
896 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
897 b63ed789 Iustin Pop
898 2c95a8d4 Iustin Pop
    return result
899 2c95a8d4 Iustin Pop
900 2c95a8d4 Iustin Pop
901 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
902 07bd8a51 Iustin Pop
  """Rename the cluster.
903 07bd8a51 Iustin Pop

904 07bd8a51 Iustin Pop
  """
905 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
906 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
907 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
908 07bd8a51 Iustin Pop
909 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
910 07bd8a51 Iustin Pop
    """Build hooks env.
911 07bd8a51 Iustin Pop

912 07bd8a51 Iustin Pop
    """
913 07bd8a51 Iustin Pop
    env = {
914 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
915 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
916 07bd8a51 Iustin Pop
      }
917 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
918 07bd8a51 Iustin Pop
    return env, [mn], [mn]
919 07bd8a51 Iustin Pop
920 07bd8a51 Iustin Pop
  def CheckPrereq(self):
921 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
922 07bd8a51 Iustin Pop

923 07bd8a51 Iustin Pop
    """
924 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
925 07bd8a51 Iustin Pop
926 bcf043c9 Iustin Pop
    new_name = hostname.name
927 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
928 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
929 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
930 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
931 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
932 07bd8a51 Iustin Pop
                                 " cluster has changed")
933 07bd8a51 Iustin Pop
    if new_ip != old_ip:
934 07bd8a51 Iustin Pop
      result = utils.RunCmd(["fping", "-q", new_ip])
935 07bd8a51 Iustin Pop
      if not result.failed:
936 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
937 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
938 07bd8a51 Iustin Pop
                                   new_ip)
939 07bd8a51 Iustin Pop
940 07bd8a51 Iustin Pop
    self.op.name = new_name
941 07bd8a51 Iustin Pop
942 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
943 07bd8a51 Iustin Pop
    """Rename the cluster.
944 07bd8a51 Iustin Pop

945 07bd8a51 Iustin Pop
    """
946 07bd8a51 Iustin Pop
    clustername = self.op.name
947 07bd8a51 Iustin Pop
    ip = self.ip
948 07bd8a51 Iustin Pop
    ss = self.sstore
949 07bd8a51 Iustin Pop
950 07bd8a51 Iustin Pop
    # shutdown the master IP
951 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
952 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
953 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
954 07bd8a51 Iustin Pop
955 07bd8a51 Iustin Pop
    try:
956 07bd8a51 Iustin Pop
      # modify the sstore
957 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
958 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
959 07bd8a51 Iustin Pop
960 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
961 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
962 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
963 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
964 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
965 07bd8a51 Iustin Pop
966 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
967 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
968 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
969 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
970 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
971 07bd8a51 Iustin Pop
          if not result[to_node]:
972 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
973 07bd8a51 Iustin Pop
                         (fname, to_node))
974 07bd8a51 Iustin Pop
    finally:
975 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
976 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
977 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
978 07bd8a51 Iustin Pop
979 07bd8a51 Iustin Pop
980 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
981 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
982 a8083063 Iustin Pop

983 a8083063 Iustin Pop
  """
984 a8083063 Iustin Pop
  if not instance.disks:
985 a8083063 Iustin Pop
    return True
986 a8083063 Iustin Pop
987 a8083063 Iustin Pop
  if not oneshot:
988 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
989 a8083063 Iustin Pop
990 a8083063 Iustin Pop
  node = instance.primary_node
991 a8083063 Iustin Pop
992 a8083063 Iustin Pop
  for dev in instance.disks:
993 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
994 a8083063 Iustin Pop
995 a8083063 Iustin Pop
  retries = 0
996 a8083063 Iustin Pop
  while True:
997 a8083063 Iustin Pop
    max_time = 0
998 a8083063 Iustin Pop
    done = True
999 a8083063 Iustin Pop
    cumul_degraded = False
1000 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1001 a8083063 Iustin Pop
    if not rstats:
1002 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1003 a8083063 Iustin Pop
      retries += 1
1004 a8083063 Iustin Pop
      if retries >= 10:
1005 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1006 3ecf6786 Iustin Pop
                                 " aborting." % node)
1007 a8083063 Iustin Pop
      time.sleep(6)
1008 a8083063 Iustin Pop
      continue
1009 a8083063 Iustin Pop
    retries = 0
1010 a8083063 Iustin Pop
    for i in range(len(rstats)):
1011 a8083063 Iustin Pop
      mstat = rstats[i]
1012 a8083063 Iustin Pop
      if mstat is None:
1013 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1014 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1015 a8083063 Iustin Pop
        continue
1016 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1017 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1018 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1019 a8083063 Iustin Pop
      if perc_done is not None:
1020 a8083063 Iustin Pop
        done = False
1021 a8083063 Iustin Pop
        if est_time is not None:
1022 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1023 a8083063 Iustin Pop
          max_time = est_time
1024 a8083063 Iustin Pop
        else:
1025 a8083063 Iustin Pop
          rem_time = "no time estimate"
1026 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1027 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1028 a8083063 Iustin Pop
    if done or oneshot:
1029 a8083063 Iustin Pop
      break
1030 a8083063 Iustin Pop
1031 a8083063 Iustin Pop
    if unlock:
1032 a8083063 Iustin Pop
      utils.Unlock('cmd')
1033 a8083063 Iustin Pop
    try:
1034 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1035 a8083063 Iustin Pop
    finally:
1036 a8083063 Iustin Pop
      if unlock:
1037 a8083063 Iustin Pop
        utils.Lock('cmd')
1038 a8083063 Iustin Pop
1039 a8083063 Iustin Pop
  if done:
1040 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1041 a8083063 Iustin Pop
  return not cumul_degraded
1042 a8083063 Iustin Pop
1043 a8083063 Iustin Pop
1044 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1045 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1046 a8083063 Iustin Pop

1047 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1048 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1049 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1050 0834c866 Iustin Pop

1051 a8083063 Iustin Pop
  """
1052 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1053 0834c866 Iustin Pop
  if ldisk:
1054 0834c866 Iustin Pop
    idx = 6
1055 0834c866 Iustin Pop
  else:
1056 0834c866 Iustin Pop
    idx = 5
1057 a8083063 Iustin Pop
1058 a8083063 Iustin Pop
  result = True
1059 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1060 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1061 a8083063 Iustin Pop
    if not rstats:
1062 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1063 a8083063 Iustin Pop
      result = False
1064 a8083063 Iustin Pop
    else:
1065 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1066 a8083063 Iustin Pop
  if dev.children:
1067 a8083063 Iustin Pop
    for child in dev.children:
1068 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1069 a8083063 Iustin Pop
1070 a8083063 Iustin Pop
  return result
1071 a8083063 Iustin Pop
1072 a8083063 Iustin Pop
1073 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1074 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1075 a8083063 Iustin Pop

1076 a8083063 Iustin Pop
  """
1077 a8083063 Iustin Pop
  _OP_REQP = []
1078 a8083063 Iustin Pop
1079 a8083063 Iustin Pop
  def CheckPrereq(self):
1080 a8083063 Iustin Pop
    """Check prerequisites.
1081 a8083063 Iustin Pop

1082 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1083 a8083063 Iustin Pop

1084 a8083063 Iustin Pop
    """
1085 a8083063 Iustin Pop
    return
1086 a8083063 Iustin Pop
1087 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1088 a8083063 Iustin Pop
    """Compute the list of OSes.
1089 a8083063 Iustin Pop

1090 a8083063 Iustin Pop
    """
1091 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1092 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1093 a8083063 Iustin Pop
    if node_data == False:
1094 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1095 a8083063 Iustin Pop
    return node_data
1096 a8083063 Iustin Pop
1097 a8083063 Iustin Pop
1098 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1099 a8083063 Iustin Pop
  """Logical unit for removing a node.
1100 a8083063 Iustin Pop

1101 a8083063 Iustin Pop
  """
1102 a8083063 Iustin Pop
  HPATH = "node-remove"
1103 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1104 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1105 a8083063 Iustin Pop
1106 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1107 a8083063 Iustin Pop
    """Build hooks env.
1108 a8083063 Iustin Pop

1109 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1110 a8083063 Iustin Pop
    node would not allows itself to run.
1111 a8083063 Iustin Pop

1112 a8083063 Iustin Pop
    """
1113 396e1b78 Michael Hanselmann
    env = {
1114 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1115 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1116 396e1b78 Michael Hanselmann
      }
1117 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1118 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1119 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1120 a8083063 Iustin Pop
1121 a8083063 Iustin Pop
  def CheckPrereq(self):
1122 a8083063 Iustin Pop
    """Check prerequisites.
1123 a8083063 Iustin Pop

1124 a8083063 Iustin Pop
    This checks:
1125 a8083063 Iustin Pop
     - the node exists in the configuration
1126 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1127 a8083063 Iustin Pop
     - it's not the master
1128 a8083063 Iustin Pop

1129 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1130 a8083063 Iustin Pop

1131 a8083063 Iustin Pop
    """
1132 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1133 a8083063 Iustin Pop
    if node is None:
1134 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1135 a8083063 Iustin Pop
1136 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1137 a8083063 Iustin Pop
1138 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1139 a8083063 Iustin Pop
    if node.name == masternode:
1140 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1141 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1142 a8083063 Iustin Pop
1143 a8083063 Iustin Pop
    for instance_name in instance_list:
1144 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1145 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1146 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1147 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1148 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1149 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1150 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1151 a8083063 Iustin Pop
    self.op.node_name = node.name
1152 a8083063 Iustin Pop
    self.node = node
1153 a8083063 Iustin Pop
1154 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1155 a8083063 Iustin Pop
    """Removes the node from the cluster.
1156 a8083063 Iustin Pop

1157 a8083063 Iustin Pop
    """
1158 a8083063 Iustin Pop
    node = self.node
1159 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1160 a8083063 Iustin Pop
                node.name)
1161 a8083063 Iustin Pop
1162 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1163 a8083063 Iustin Pop
1164 c92b310a Michael Hanselmann
    self.ssh.Run(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1165 a8083063 Iustin Pop
1166 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1167 a8083063 Iustin Pop
1168 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1169 a8083063 Iustin Pop
1170 c8a0948f Michael Hanselmann
    _RemoveHostFromEtcHosts(node.name)
1171 c8a0948f Michael Hanselmann
1172 a8083063 Iustin Pop
1173 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1174 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1175 a8083063 Iustin Pop

1176 a8083063 Iustin Pop
  """
1177 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1178 a8083063 Iustin Pop
1179 a8083063 Iustin Pop
  def CheckPrereq(self):
1180 a8083063 Iustin Pop
    """Check prerequisites.
1181 a8083063 Iustin Pop

1182 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1183 a8083063 Iustin Pop

1184 a8083063 Iustin Pop
    """
1185 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1186 3ef10550 Michael Hanselmann
                                     "mtotal", "mnode", "mfree",
1187 3ef10550 Michael Hanselmann
                                     "bootid"])
1188 a8083063 Iustin Pop
1189 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1190 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1191 ec223efb Iustin Pop
                               "pip", "sip"],
1192 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1193 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1194 a8083063 Iustin Pop
1195 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1196 a8083063 Iustin Pop
1197 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1198 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1199 a8083063 Iustin Pop

1200 a8083063 Iustin Pop
    """
1201 246e180a Iustin Pop
    nodenames = self.wanted
1202 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1203 a8083063 Iustin Pop
1204 a8083063 Iustin Pop
    # begin data gathering
1205 a8083063 Iustin Pop
1206 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1207 a8083063 Iustin Pop
      live_data = {}
1208 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1209 a8083063 Iustin Pop
      for name in nodenames:
1210 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1211 a8083063 Iustin Pop
        if nodeinfo:
1212 a8083063 Iustin Pop
          live_data[name] = {
1213 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1214 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1215 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1216 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1217 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1218 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1219 a8083063 Iustin Pop
            }
1220 a8083063 Iustin Pop
        else:
1221 a8083063 Iustin Pop
          live_data[name] = {}
1222 a8083063 Iustin Pop
    else:
1223 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1224 a8083063 Iustin Pop
1225 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1226 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1227 a8083063 Iustin Pop
1228 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1229 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1230 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1231 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1232 a8083063 Iustin Pop
1233 ec223efb Iustin Pop
      for instance_name in instancelist:
1234 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1235 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1236 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1237 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1238 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1239 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1240 a8083063 Iustin Pop
1241 a8083063 Iustin Pop
    # end data gathering
1242 a8083063 Iustin Pop
1243 a8083063 Iustin Pop
    output = []
1244 a8083063 Iustin Pop
    for node in nodelist:
1245 a8083063 Iustin Pop
      node_output = []
1246 a8083063 Iustin Pop
      for field in self.op.output_fields:
1247 a8083063 Iustin Pop
        if field == "name":
1248 a8083063 Iustin Pop
          val = node.name
1249 ec223efb Iustin Pop
        elif field == "pinst_list":
1250 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1251 ec223efb Iustin Pop
        elif field == "sinst_list":
1252 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1253 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1254 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1255 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1256 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1257 a8083063 Iustin Pop
        elif field == "pip":
1258 a8083063 Iustin Pop
          val = node.primary_ip
1259 a8083063 Iustin Pop
        elif field == "sip":
1260 a8083063 Iustin Pop
          val = node.secondary_ip
1261 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1262 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1263 a8083063 Iustin Pop
        else:
1264 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1265 a8083063 Iustin Pop
        node_output.append(val)
1266 a8083063 Iustin Pop
      output.append(node_output)
1267 a8083063 Iustin Pop
1268 a8083063 Iustin Pop
    return output
1269 a8083063 Iustin Pop
1270 a8083063 Iustin Pop
1271 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1272 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1273 dcb93971 Michael Hanselmann

1274 dcb93971 Michael Hanselmann
  """
1275 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1276 dcb93971 Michael Hanselmann
1277 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1278 dcb93971 Michael Hanselmann
    """Check prerequisites.
1279 dcb93971 Michael Hanselmann

1280 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1281 dcb93971 Michael Hanselmann

1282 dcb93971 Michael Hanselmann
    """
1283 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1284 dcb93971 Michael Hanselmann
1285 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1286 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1287 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1288 dcb93971 Michael Hanselmann
1289 dcb93971 Michael Hanselmann
1290 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1291 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1292 dcb93971 Michael Hanselmann

1293 dcb93971 Michael Hanselmann
    """
1294 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1295 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1296 dcb93971 Michael Hanselmann
1297 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1298 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1299 dcb93971 Michael Hanselmann
1300 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1301 dcb93971 Michael Hanselmann
1302 dcb93971 Michael Hanselmann
    output = []
1303 dcb93971 Michael Hanselmann
    for node in nodenames:
1304 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1305 37d19eb2 Michael Hanselmann
        continue
1306 37d19eb2 Michael Hanselmann
1307 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1308 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1309 dcb93971 Michael Hanselmann
1310 dcb93971 Michael Hanselmann
      for vol in node_vols:
1311 dcb93971 Michael Hanselmann
        node_output = []
1312 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1313 dcb93971 Michael Hanselmann
          if field == "node":
1314 dcb93971 Michael Hanselmann
            val = node
1315 dcb93971 Michael Hanselmann
          elif field == "phys":
1316 dcb93971 Michael Hanselmann
            val = vol['dev']
1317 dcb93971 Michael Hanselmann
          elif field == "vg":
1318 dcb93971 Michael Hanselmann
            val = vol['vg']
1319 dcb93971 Michael Hanselmann
          elif field == "name":
1320 dcb93971 Michael Hanselmann
            val = vol['name']
1321 dcb93971 Michael Hanselmann
          elif field == "size":
1322 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1323 dcb93971 Michael Hanselmann
          elif field == "instance":
1324 dcb93971 Michael Hanselmann
            for inst in ilist:
1325 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1326 dcb93971 Michael Hanselmann
                continue
1327 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1328 dcb93971 Michael Hanselmann
                val = inst.name
1329 dcb93971 Michael Hanselmann
                break
1330 dcb93971 Michael Hanselmann
            else:
1331 dcb93971 Michael Hanselmann
              val = '-'
1332 dcb93971 Michael Hanselmann
          else:
1333 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1334 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1335 dcb93971 Michael Hanselmann
1336 dcb93971 Michael Hanselmann
        output.append(node_output)
1337 dcb93971 Michael Hanselmann
1338 dcb93971 Michael Hanselmann
    return output
1339 dcb93971 Michael Hanselmann
1340 dcb93971 Michael Hanselmann
1341 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1342 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1343 a8083063 Iustin Pop

1344 a8083063 Iustin Pop
  """
1345 a8083063 Iustin Pop
  HPATH = "node-add"
1346 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1347 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1348 a8083063 Iustin Pop
1349 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1350 a8083063 Iustin Pop
    """Build hooks env.
1351 a8083063 Iustin Pop

1352 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1353 a8083063 Iustin Pop

1354 a8083063 Iustin Pop
    """
1355 a8083063 Iustin Pop
    env = {
1356 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1357 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1358 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1359 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1360 a8083063 Iustin Pop
      }
1361 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1362 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1363 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1364 a8083063 Iustin Pop
1365 a8083063 Iustin Pop
  def CheckPrereq(self):
1366 a8083063 Iustin Pop
    """Check prerequisites.
1367 a8083063 Iustin Pop

1368 a8083063 Iustin Pop
    This checks:
1369 a8083063 Iustin Pop
     - the new node is not already in the config
1370 a8083063 Iustin Pop
     - it is resolvable
1371 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1372 a8083063 Iustin Pop

1373 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1374 a8083063 Iustin Pop

1375 a8083063 Iustin Pop
    """
1376 a8083063 Iustin Pop
    node_name = self.op.node_name
1377 a8083063 Iustin Pop
    cfg = self.cfg
1378 a8083063 Iustin Pop
1379 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1380 a8083063 Iustin Pop
1381 bcf043c9 Iustin Pop
    node = dns_data.name
1382 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1383 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1384 a8083063 Iustin Pop
    if secondary_ip is None:
1385 a8083063 Iustin Pop
      secondary_ip = primary_ip
1386 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1387 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1388 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1389 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1390 a8083063 Iustin Pop
    if node in node_list:
1391 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is already in the configuration"
1392 3ecf6786 Iustin Pop
                                 % node)
1393 a8083063 Iustin Pop
1394 a8083063 Iustin Pop
    for existing_node_name in node_list:
1395 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1396 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1397 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1398 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1399 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1400 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1401 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1402 a8083063 Iustin Pop
1403 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1404 a8083063 Iustin Pop
    # same as for the master
1405 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1406 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1407 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1408 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1409 a8083063 Iustin Pop
      if master_singlehomed:
1410 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1411 3ecf6786 Iustin Pop
                                   " new node has one")
1412 a8083063 Iustin Pop
      else:
1413 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1414 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1415 a8083063 Iustin Pop
1416 a8083063 Iustin Pop
    # checks reachablity
1417 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1418 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1419 a8083063 Iustin Pop
1420 a8083063 Iustin Pop
    if not newbie_singlehomed:
1421 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1422 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1423 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1424 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1425 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1426 a8083063 Iustin Pop
1427 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1428 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1429 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1430 a8083063 Iustin Pop
1431 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1432 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
1433 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Cluster VNC password file %s missing" %
1434 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
1435 2a6469d5 Alexander Schreiber
1436 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1437 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1438 a8083063 Iustin Pop

1439 a8083063 Iustin Pop
    """
1440 a8083063 Iustin Pop
    new_node = self.new_node
1441 a8083063 Iustin Pop
    node = new_node.name
1442 a8083063 Iustin Pop
1443 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1444 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1445 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1446 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1447 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1448 a8083063 Iustin Pop
    try:
1449 a8083063 Iustin Pop
      gntpem = f.read(8192)
1450 a8083063 Iustin Pop
    finally:
1451 a8083063 Iustin Pop
      f.close()
1452 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1453 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1454 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1455 a8083063 Iustin Pop
    # parsed by the shell sequence below
1456 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1457 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1458 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1459 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1460 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1461 a8083063 Iustin Pop
1462 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1463 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1464 a8083063 Iustin Pop
    # either by being constants or by the checks above
1465 a8083063 Iustin Pop
    ss = self.sstore
1466 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1467 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1468 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1469 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1470 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1471 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1472 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1473 a8083063 Iustin Pop
1474 c92b310a Michael Hanselmann
    result = self.ssh.Run(node, 'root', mycommand, batch=False, ask_key=True)
1475 a8083063 Iustin Pop
    if result.failed:
1476 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1477 3ecf6786 Iustin Pop
                               " output: %s" %
1478 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1479 a8083063 Iustin Pop
1480 a8083063 Iustin Pop
    # check connectivity
1481 a8083063 Iustin Pop
    time.sleep(4)
1482 a8083063 Iustin Pop
1483 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1484 a8083063 Iustin Pop
    if result:
1485 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1486 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1487 a8083063 Iustin Pop
                    (node, result))
1488 a8083063 Iustin Pop
      else:
1489 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1490 3ecf6786 Iustin Pop
                                 " node version %s" %
1491 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1492 a8083063 Iustin Pop
    else:
1493 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1494 a8083063 Iustin Pop
1495 a8083063 Iustin Pop
    # setup ssh on node
1496 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1497 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1498 a8083063 Iustin Pop
    keyarray = []
1499 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1500 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1501 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1502 a8083063 Iustin Pop
1503 a8083063 Iustin Pop
    for i in keyfiles:
1504 a8083063 Iustin Pop
      f = open(i, 'r')
1505 a8083063 Iustin Pop
      try:
1506 a8083063 Iustin Pop
        keyarray.append(f.read())
1507 a8083063 Iustin Pop
      finally:
1508 a8083063 Iustin Pop
        f.close()
1509 a8083063 Iustin Pop
1510 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1511 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1512 a8083063 Iustin Pop
1513 a8083063 Iustin Pop
    if not result:
1514 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1515 a8083063 Iustin Pop
1516 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1517 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(new_node.name)
1518 c8a0948f Michael Hanselmann
1519 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1520 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1521 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1522 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1523 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1524 16abfbc2 Alexander Schreiber
                                    10, False):
1525 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1526 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1527 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1528 a8083063 Iustin Pop
1529 c92b310a Michael Hanselmann
    success, msg = self.ssh.VerifyNodeHostname(node)
1530 ff98055b Iustin Pop
    if not success:
1531 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1532 f4bc1f2c Michael Hanselmann
                               " than the one the resolver gives: %s."
1533 f4bc1f2c Michael Hanselmann
                               " Please fix and re-run this command." %
1534 ff98055b Iustin Pop
                               (node, msg))
1535 ff98055b Iustin Pop
1536 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1537 a8083063 Iustin Pop
    # including the node just added
1538 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1539 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1540 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1541 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1542 a8083063 Iustin Pop
1543 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1544 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1545 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1546 a8083063 Iustin Pop
      for to_node in dist_nodes:
1547 a8083063 Iustin Pop
        if not result[to_node]:
1548 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1549 a8083063 Iustin Pop
                       (fname, to_node))
1550 a8083063 Iustin Pop
1551 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1552 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1553 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1554 a8083063 Iustin Pop
    for fname in to_copy:
1555 c92b310a Michael Hanselmann
      if not self.ssh.CopyFileToNode(node, fname):
1556 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1557 a8083063 Iustin Pop
1558 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1559 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1560 a8083063 Iustin Pop
1561 a8083063 Iustin Pop
1562 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1563 a8083063 Iustin Pop
  """Failover the master node to the current node.
1564 a8083063 Iustin Pop

1565 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1566 a8083063 Iustin Pop

1567 a8083063 Iustin Pop
  """
1568 a8083063 Iustin Pop
  HPATH = "master-failover"
1569 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1570 a8083063 Iustin Pop
  REQ_MASTER = False
1571 a8083063 Iustin Pop
  _OP_REQP = []
1572 a8083063 Iustin Pop
1573 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1574 a8083063 Iustin Pop
    """Build hooks env.
1575 a8083063 Iustin Pop

1576 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1577 a8083063 Iustin Pop
    the nodes in the post phase.
1578 a8083063 Iustin Pop

1579 a8083063 Iustin Pop
    """
1580 a8083063 Iustin Pop
    env = {
1581 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1582 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1583 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1584 a8083063 Iustin Pop
      }
1585 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1586 a8083063 Iustin Pop
1587 a8083063 Iustin Pop
  def CheckPrereq(self):
1588 a8083063 Iustin Pop
    """Check prerequisites.
1589 a8083063 Iustin Pop

1590 a8083063 Iustin Pop
    This checks that we are not already the master.
1591 a8083063 Iustin Pop

1592 a8083063 Iustin Pop
    """
1593 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1594 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1595 a8083063 Iustin Pop
1596 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1597 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1598 f4bc1f2c Michael Hanselmann
                                 " where you want the new master to be."
1599 f4bc1f2c Michael Hanselmann
                                 " %s is already the master" %
1600 3ecf6786 Iustin Pop
                                 self.old_master)
1601 a8083063 Iustin Pop
1602 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1603 a8083063 Iustin Pop
    """Failover the master node.
1604 a8083063 Iustin Pop

1605 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1606 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1607 a8083063 Iustin Pop
    master.
1608 a8083063 Iustin Pop

1609 a8083063 Iustin Pop
    """
1610 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1611 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1612 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1613 a8083063 Iustin Pop
1614 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1615 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1616 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1617 a8083063 Iustin Pop
1618 880478f8 Iustin Pop
    ss = self.sstore
1619 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1620 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1621 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1622 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1623 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1624 880478f8 Iustin Pop
1625 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1626 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1627 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1628 f4bc1f2c Michael Hanselmann
      feedback_fn("Error in activating the master IP on the new master,"
1629 f4bc1f2c Michael Hanselmann
                  " please fix manually.")
1630 a8083063 Iustin Pop
1631 a8083063 Iustin Pop
1632 a8083063 Iustin Pop
1633 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1634 a8083063 Iustin Pop
  """Query cluster configuration.
1635 a8083063 Iustin Pop

1636 a8083063 Iustin Pop
  """
1637 a8083063 Iustin Pop
  _OP_REQP = []
1638 59322403 Iustin Pop
  REQ_MASTER = False
1639 a8083063 Iustin Pop
1640 a8083063 Iustin Pop
  def CheckPrereq(self):
1641 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1642 a8083063 Iustin Pop

1643 a8083063 Iustin Pop
    """
1644 a8083063 Iustin Pop
    pass
1645 a8083063 Iustin Pop
1646 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1647 a8083063 Iustin Pop
    """Return cluster config.
1648 a8083063 Iustin Pop

1649 a8083063 Iustin Pop
    """
1650 a8083063 Iustin Pop
    result = {
1651 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1652 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1653 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1654 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1655 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1656 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1657 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1658 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1659 a8083063 Iustin Pop
      }
1660 a8083063 Iustin Pop
1661 a8083063 Iustin Pop
    return result
1662 a8083063 Iustin Pop
1663 a8083063 Iustin Pop
1664 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1665 a8083063 Iustin Pop
  """Copy file to cluster.
1666 a8083063 Iustin Pop

1667 a8083063 Iustin Pop
  """
1668 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1669 a8083063 Iustin Pop
1670 a8083063 Iustin Pop
  def CheckPrereq(self):
1671 a8083063 Iustin Pop
    """Check prerequisites.
1672 a8083063 Iustin Pop

1673 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1674 a8083063 Iustin Pop
    of nodes is valid.
1675 a8083063 Iustin Pop

1676 a8083063 Iustin Pop
    """
1677 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1678 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1679 dcb93971 Michael Hanselmann
1680 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1681 a8083063 Iustin Pop
1682 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1683 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1684 a8083063 Iustin Pop

1685 a8083063 Iustin Pop
    Args:
1686 a8083063 Iustin Pop
      opts - class with options as members
1687 a8083063 Iustin Pop
      args - list containing a single element, the file name
1688 a8083063 Iustin Pop
    Opts used:
1689 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1690 a8083063 Iustin Pop

1691 a8083063 Iustin Pop
    """
1692 a8083063 Iustin Pop
    filename = self.op.filename
1693 a8083063 Iustin Pop
1694 89e1fc26 Iustin Pop
    myname = utils.HostInfo().name
1695 a8083063 Iustin Pop
1696 a7ba5e53 Iustin Pop
    for node in self.nodes:
1697 a8083063 Iustin Pop
      if node == myname:
1698 a8083063 Iustin Pop
        continue
1699 c92b310a Michael Hanselmann
      if not self.ssh.CopyFileToNode(node, filename):
1700 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1701 a8083063 Iustin Pop
1702 a8083063 Iustin Pop
1703 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1704 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1705 a8083063 Iustin Pop

1706 a8083063 Iustin Pop
  """
1707 a8083063 Iustin Pop
  _OP_REQP = []
1708 a8083063 Iustin Pop
1709 a8083063 Iustin Pop
  def CheckPrereq(self):
1710 a8083063 Iustin Pop
    """No prerequisites.
1711 a8083063 Iustin Pop

1712 a8083063 Iustin Pop
    """
1713 a8083063 Iustin Pop
    pass
1714 a8083063 Iustin Pop
1715 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1716 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1717 a8083063 Iustin Pop

1718 a8083063 Iustin Pop
    """
1719 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1720 a8083063 Iustin Pop
1721 a8083063 Iustin Pop
1722 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1723 a8083063 Iustin Pop
  """Run a command on some nodes.
1724 a8083063 Iustin Pop

1725 a8083063 Iustin Pop
  """
1726 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1727 a8083063 Iustin Pop
1728 a8083063 Iustin Pop
  def CheckPrereq(self):
1729 a8083063 Iustin Pop
    """Check prerequisites.
1730 a8083063 Iustin Pop

1731 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1732 a8083063 Iustin Pop

1733 a8083063 Iustin Pop
    """
1734 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1735 a8083063 Iustin Pop
1736 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1737 a8083063 Iustin Pop
    """Run a command on some nodes.
1738 a8083063 Iustin Pop

1739 a8083063 Iustin Pop
    """
1740 a8083063 Iustin Pop
    data = []
1741 a8083063 Iustin Pop
    for node in self.nodes:
1742 c92b310a Michael Hanselmann
      result = self.ssh.Run(node, "root", self.op.command)
1743 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
1744 a8083063 Iustin Pop
1745 a8083063 Iustin Pop
    return data
1746 a8083063 Iustin Pop
1747 a8083063 Iustin Pop
1748 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1749 a8083063 Iustin Pop
  """Bring up an instance's disks.
1750 a8083063 Iustin Pop

1751 a8083063 Iustin Pop
  """
1752 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1753 a8083063 Iustin Pop
1754 a8083063 Iustin Pop
  def CheckPrereq(self):
1755 a8083063 Iustin Pop
    """Check prerequisites.
1756 a8083063 Iustin Pop

1757 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1758 a8083063 Iustin Pop

1759 a8083063 Iustin Pop
    """
1760 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1761 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1762 a8083063 Iustin Pop
    if instance is None:
1763 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1764 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1765 a8083063 Iustin Pop
    self.instance = instance
1766 a8083063 Iustin Pop
1767 a8083063 Iustin Pop
1768 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1769 a8083063 Iustin Pop
    """Activate the disks.
1770 a8083063 Iustin Pop

1771 a8083063 Iustin Pop
    """
1772 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1773 a8083063 Iustin Pop
    if not disks_ok:
1774 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1775 a8083063 Iustin Pop
1776 a8083063 Iustin Pop
    return disks_info
1777 a8083063 Iustin Pop
1778 a8083063 Iustin Pop
1779 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1780 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1781 a8083063 Iustin Pop

1782 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1783 a8083063 Iustin Pop

1784 a8083063 Iustin Pop
  Args:
1785 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1786 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1787 a8083063 Iustin Pop
                        in an error return from the function
1788 a8083063 Iustin Pop

1789 a8083063 Iustin Pop
  Returns:
1790 a8083063 Iustin Pop
    false if the operation failed
1791 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1792 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1793 a8083063 Iustin Pop
  """
1794 a8083063 Iustin Pop
  device_info = []
1795 a8083063 Iustin Pop
  disks_ok = True
1796 fdbd668d Iustin Pop
  iname = instance.name
1797 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1798 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1799 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1800 fdbd668d Iustin Pop
1801 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1802 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1803 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1804 fdbd668d Iustin Pop
  # SyncSource, etc.)
1805 fdbd668d Iustin Pop
1806 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1807 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1808 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1809 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1810 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1811 a8083063 Iustin Pop
      if not result:
1812 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1813 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1814 fdbd668d Iustin Pop
        if not ignore_secondaries:
1815 a8083063 Iustin Pop
          disks_ok = False
1816 fdbd668d Iustin Pop
1817 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1818 fdbd668d Iustin Pop
1819 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1820 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1821 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1822 fdbd668d Iustin Pop
      if node != instance.primary_node:
1823 fdbd668d Iustin Pop
        continue
1824 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
1825 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1826 fdbd668d Iustin Pop
      if not result:
1827 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1828 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1829 fdbd668d Iustin Pop
        disks_ok = False
1830 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1831 a8083063 Iustin Pop
1832 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1833 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1834 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1835 b352ab5b Iustin Pop
  for disk in instance.disks:
1836 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1837 b352ab5b Iustin Pop
1838 a8083063 Iustin Pop
  return disks_ok, device_info
1839 a8083063 Iustin Pop
1840 a8083063 Iustin Pop
1841 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1842 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1843 3ecf6786 Iustin Pop

1844 3ecf6786 Iustin Pop
  """
1845 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1846 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1847 fe7b0351 Michael Hanselmann
  if not disks_ok:
1848 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1849 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1850 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1851 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1852 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1853 fe7b0351 Michael Hanselmann
1854 fe7b0351 Michael Hanselmann
1855 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1856 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1857 a8083063 Iustin Pop

1858 a8083063 Iustin Pop
  """
1859 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1860 a8083063 Iustin Pop
1861 a8083063 Iustin Pop
  def CheckPrereq(self):
1862 a8083063 Iustin Pop
    """Check prerequisites.
1863 a8083063 Iustin Pop

1864 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1865 a8083063 Iustin Pop

1866 a8083063 Iustin Pop
    """
1867 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1868 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1869 a8083063 Iustin Pop
    if instance is None:
1870 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1871 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1872 a8083063 Iustin Pop
    self.instance = instance
1873 a8083063 Iustin Pop
1874 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1875 a8083063 Iustin Pop
    """Deactivate the disks
1876 a8083063 Iustin Pop

1877 a8083063 Iustin Pop
    """
1878 a8083063 Iustin Pop
    instance = self.instance
1879 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1880 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1881 a8083063 Iustin Pop
    if not type(ins_l) is list:
1882 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1883 3ecf6786 Iustin Pop
                               instance.primary_node)
1884 a8083063 Iustin Pop
1885 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1886 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1887 3ecf6786 Iustin Pop
                               " block devices.")
1888 a8083063 Iustin Pop
1889 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1890 a8083063 Iustin Pop
1891 a8083063 Iustin Pop
1892 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1893 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1894 a8083063 Iustin Pop

1895 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1896 a8083063 Iustin Pop

1897 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1898 a8083063 Iustin Pop
  ignored.
1899 a8083063 Iustin Pop

1900 a8083063 Iustin Pop
  """
1901 a8083063 Iustin Pop
  result = True
1902 a8083063 Iustin Pop
  for disk in instance.disks:
1903 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1904 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1905 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1906 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1907 a8083063 Iustin Pop
                     (disk.iv_name, node))
1908 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1909 a8083063 Iustin Pop
          result = False
1910 a8083063 Iustin Pop
  return result
1911 a8083063 Iustin Pop
1912 a8083063 Iustin Pop
1913 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
1914 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
1915 d4f16fd9 Iustin Pop

1916 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
1917 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
1918 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
1919 d4f16fd9 Iustin Pop
  exception.
1920 d4f16fd9 Iustin Pop

1921 d4f16fd9 Iustin Pop
  Args:
1922 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
1923 d4f16fd9 Iustin Pop
    - node: the node name
1924 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
1925 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
1926 d4f16fd9 Iustin Pop

1927 d4f16fd9 Iustin Pop
  """
1928 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
1929 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
1930 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
1931 d4f16fd9 Iustin Pop
                             " information" % (node,))
1932 d4f16fd9 Iustin Pop
1933 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
1934 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
1935 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
1936 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
1937 d4f16fd9 Iustin Pop
  if requested > free_mem:
1938 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
1939 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
1940 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
1941 d4f16fd9 Iustin Pop
1942 d4f16fd9 Iustin Pop
1943 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1944 a8083063 Iustin Pop
  """Starts an instance.
1945 a8083063 Iustin Pop

1946 a8083063 Iustin Pop
  """
1947 a8083063 Iustin Pop
  HPATH = "instance-start"
1948 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1949 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1950 a8083063 Iustin Pop
1951 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1952 a8083063 Iustin Pop
    """Build hooks env.
1953 a8083063 Iustin Pop

1954 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1955 a8083063 Iustin Pop

1956 a8083063 Iustin Pop
    """
1957 a8083063 Iustin Pop
    env = {
1958 a8083063 Iustin Pop
      "FORCE": self.op.force,
1959 a8083063 Iustin Pop
      }
1960 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1961 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1962 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1963 a8083063 Iustin Pop
    return env, nl, nl
1964 a8083063 Iustin Pop
1965 a8083063 Iustin Pop
  def CheckPrereq(self):
1966 a8083063 Iustin Pop
    """Check prerequisites.
1967 a8083063 Iustin Pop

1968 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1969 a8083063 Iustin Pop

1970 a8083063 Iustin Pop
    """
1971 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1972 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1973 a8083063 Iustin Pop
    if instance is None:
1974 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1975 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1976 a8083063 Iustin Pop
1977 a8083063 Iustin Pop
    # check bridges existance
1978 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
1979 a8083063 Iustin Pop
1980 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
1981 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
1982 d4f16fd9 Iustin Pop
                         instance.memory)
1983 d4f16fd9 Iustin Pop
1984 a8083063 Iustin Pop
    self.instance = instance
1985 a8083063 Iustin Pop
    self.op.instance_name = instance.name
1986 a8083063 Iustin Pop
1987 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1988 a8083063 Iustin Pop
    """Start the instance.
1989 a8083063 Iustin Pop

1990 a8083063 Iustin Pop
    """
1991 a8083063 Iustin Pop
    instance = self.instance
1992 a8083063 Iustin Pop
    force = self.op.force
1993 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
1994 a8083063 Iustin Pop
1995 a8083063 Iustin Pop
    node_current = instance.primary_node
1996 a8083063 Iustin Pop
1997 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
1998 a8083063 Iustin Pop
1999 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2000 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2001 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2002 a8083063 Iustin Pop
2003 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2004 a8083063 Iustin Pop
2005 a8083063 Iustin Pop
2006 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2007 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2008 bf6929a2 Alexander Schreiber

2009 bf6929a2 Alexander Schreiber
  """
2010 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2011 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2012 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2013 bf6929a2 Alexander Schreiber
2014 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2015 bf6929a2 Alexander Schreiber
    """Build hooks env.
2016 bf6929a2 Alexander Schreiber

2017 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2018 bf6929a2 Alexander Schreiber

2019 bf6929a2 Alexander Schreiber
    """
2020 bf6929a2 Alexander Schreiber
    env = {
2021 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2022 bf6929a2 Alexander Schreiber
      }
2023 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2024 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2025 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2026 bf6929a2 Alexander Schreiber
    return env, nl, nl
2027 bf6929a2 Alexander Schreiber
2028 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2029 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2030 bf6929a2 Alexander Schreiber

2031 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2032 bf6929a2 Alexander Schreiber

2033 bf6929a2 Alexander Schreiber
    """
2034 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
2035 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
2036 bf6929a2 Alexander Schreiber
    if instance is None:
2037 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
2038 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
2039 bf6929a2 Alexander Schreiber
2040 bf6929a2 Alexander Schreiber
    # check bridges existance
2041 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2042 bf6929a2 Alexander Schreiber
2043 bf6929a2 Alexander Schreiber
    self.instance = instance
2044 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
2045 bf6929a2 Alexander Schreiber
2046 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2047 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2048 bf6929a2 Alexander Schreiber

2049 bf6929a2 Alexander Schreiber
    """
2050 bf6929a2 Alexander Schreiber
    instance = self.instance
2051 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2052 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2053 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2054 bf6929a2 Alexander Schreiber
2055 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2056 bf6929a2 Alexander Schreiber
2057 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2058 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
2059 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
2060 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2061 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
2062 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2063 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2064 bf6929a2 Alexander Schreiber
2065 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2066 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2067 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2068 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2069 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2070 bf6929a2 Alexander Schreiber
    else:
2071 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2072 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2073 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2074 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2075 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2076 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2077 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2078 bf6929a2 Alexander Schreiber
2079 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2080 bf6929a2 Alexander Schreiber
2081 bf6929a2 Alexander Schreiber
2082 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2083 a8083063 Iustin Pop
  """Shutdown an instance.
2084 a8083063 Iustin Pop

2085 a8083063 Iustin Pop
  """
2086 a8083063 Iustin Pop
  HPATH = "instance-stop"
2087 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2088 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2089 a8083063 Iustin Pop
2090 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2091 a8083063 Iustin Pop
    """Build hooks env.
2092 a8083063 Iustin Pop

2093 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2094 a8083063 Iustin Pop

2095 a8083063 Iustin Pop
    """
2096 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2097 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2098 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2099 a8083063 Iustin Pop
    return env, nl, nl
2100 a8083063 Iustin Pop
2101 a8083063 Iustin Pop
  def CheckPrereq(self):
2102 a8083063 Iustin Pop
    """Check prerequisites.
2103 a8083063 Iustin Pop

2104 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2105 a8083063 Iustin Pop

2106 a8083063 Iustin Pop
    """
2107 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2108 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2109 a8083063 Iustin Pop
    if instance is None:
2110 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2111 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2112 a8083063 Iustin Pop
    self.instance = instance
2113 a8083063 Iustin Pop
2114 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2115 a8083063 Iustin Pop
    """Shutdown the instance.
2116 a8083063 Iustin Pop

2117 a8083063 Iustin Pop
    """
2118 a8083063 Iustin Pop
    instance = self.instance
2119 a8083063 Iustin Pop
    node_current = instance.primary_node
2120 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2121 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2122 a8083063 Iustin Pop
2123 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2124 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2125 a8083063 Iustin Pop
2126 a8083063 Iustin Pop
2127 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2128 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2129 fe7b0351 Michael Hanselmann

2130 fe7b0351 Michael Hanselmann
  """
2131 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2132 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2133 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2134 fe7b0351 Michael Hanselmann
2135 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2136 fe7b0351 Michael Hanselmann
    """Build hooks env.
2137 fe7b0351 Michael Hanselmann

2138 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2139 fe7b0351 Michael Hanselmann

2140 fe7b0351 Michael Hanselmann
    """
2141 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2142 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2143 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2144 fe7b0351 Michael Hanselmann
    return env, nl, nl
2145 fe7b0351 Michael Hanselmann
2146 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2147 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2148 fe7b0351 Michael Hanselmann

2149 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2150 fe7b0351 Michael Hanselmann

2151 fe7b0351 Michael Hanselmann
    """
2152 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2153 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2154 fe7b0351 Michael Hanselmann
    if instance is None:
2155 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2156 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2157 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2158 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2159 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2160 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2161 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2162 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2163 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2164 fe7b0351 Michael Hanselmann
    if remote_info:
2165 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2166 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2167 3ecf6786 Iustin Pop
                                  instance.primary_node))
2168 d0834de3 Michael Hanselmann
2169 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2170 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2171 d0834de3 Michael Hanselmann
      # OS verification
2172 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2173 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2174 d0834de3 Michael Hanselmann
      if pnode is None:
2175 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2176 3ecf6786 Iustin Pop
                                   self.op.pnode)
2177 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2178 dfa96ded Guido Trotter
      if not os_obj:
2179 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2180 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2181 d0834de3 Michael Hanselmann
2182 fe7b0351 Michael Hanselmann
    self.instance = instance
2183 fe7b0351 Michael Hanselmann
2184 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2185 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2186 fe7b0351 Michael Hanselmann

2187 fe7b0351 Michael Hanselmann
    """
2188 fe7b0351 Michael Hanselmann
    inst = self.instance
2189 fe7b0351 Michael Hanselmann
2190 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2191 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2192 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2193 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2194 d0834de3 Michael Hanselmann
2195 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2196 fe7b0351 Michael Hanselmann
    try:
2197 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2198 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2199 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2200 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2201 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2202 fe7b0351 Michael Hanselmann
    finally:
2203 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2204 fe7b0351 Michael Hanselmann
2205 fe7b0351 Michael Hanselmann
2206 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2207 decd5f45 Iustin Pop
  """Rename an instance.
2208 decd5f45 Iustin Pop

2209 decd5f45 Iustin Pop
  """
2210 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2211 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2212 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2213 decd5f45 Iustin Pop
2214 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2215 decd5f45 Iustin Pop
    """Build hooks env.
2216 decd5f45 Iustin Pop

2217 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2218 decd5f45 Iustin Pop

2219 decd5f45 Iustin Pop
    """
2220 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2221 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2222 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2223 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2224 decd5f45 Iustin Pop
    return env, nl, nl
2225 decd5f45 Iustin Pop
2226 decd5f45 Iustin Pop
  def CheckPrereq(self):
2227 decd5f45 Iustin Pop
    """Check prerequisites.
2228 decd5f45 Iustin Pop

2229 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2230 decd5f45 Iustin Pop

2231 decd5f45 Iustin Pop
    """
2232 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2233 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2234 decd5f45 Iustin Pop
    if instance is None:
2235 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2236 decd5f45 Iustin Pop
                                 self.op.instance_name)
2237 decd5f45 Iustin Pop
    if instance.status != "down":
2238 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2239 decd5f45 Iustin Pop
                                 self.op.instance_name)
2240 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2241 decd5f45 Iustin Pop
    if remote_info:
2242 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2243 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2244 decd5f45 Iustin Pop
                                  instance.primary_node))
2245 decd5f45 Iustin Pop
    self.instance = instance
2246 decd5f45 Iustin Pop
2247 decd5f45 Iustin Pop
    # new name verification
2248 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2249 decd5f45 Iustin Pop
2250 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2251 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2252 7bde3275 Guido Trotter
    if new_name in instance_list:
2253 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2254 7bde3275 Guido Trotter
                                 instance_name)
2255 7bde3275 Guido Trotter
2256 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2257 89e1fc26 Iustin Pop
      command = ["fping", "-q", name_info.ip]
2258 decd5f45 Iustin Pop
      result = utils.RunCmd(command)
2259 decd5f45 Iustin Pop
      if not result.failed:
2260 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2261 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2262 decd5f45 Iustin Pop
2263 decd5f45 Iustin Pop
2264 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2265 decd5f45 Iustin Pop
    """Reinstall the instance.
2266 decd5f45 Iustin Pop

2267 decd5f45 Iustin Pop
    """
2268 decd5f45 Iustin Pop
    inst = self.instance
2269 decd5f45 Iustin Pop
    old_name = inst.name
2270 decd5f45 Iustin Pop
2271 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2272 decd5f45 Iustin Pop
2273 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2274 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2275 decd5f45 Iustin Pop
2276 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2277 decd5f45 Iustin Pop
    try:
2278 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2279 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2280 f4bc1f2c Michael Hanselmann
        msg = ("Could run OS rename script for instance %s on node %s (but the"
2281 f4bc1f2c Michael Hanselmann
               " instance has been renamed in Ganeti)" %
2282 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2283 decd5f45 Iustin Pop
        logger.Error(msg)
2284 decd5f45 Iustin Pop
    finally:
2285 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2286 decd5f45 Iustin Pop
2287 decd5f45 Iustin Pop
2288 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2289 a8083063 Iustin Pop
  """Remove an instance.
2290 a8083063 Iustin Pop

2291 a8083063 Iustin Pop
  """
2292 a8083063 Iustin Pop
  HPATH = "instance-remove"
2293 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2294 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2295 a8083063 Iustin Pop
2296 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2297 a8083063 Iustin Pop
    """Build hooks env.
2298 a8083063 Iustin Pop

2299 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2300 a8083063 Iustin Pop

2301 a8083063 Iustin Pop
    """
2302 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2303 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2304 a8083063 Iustin Pop
    return env, nl, nl
2305 a8083063 Iustin Pop
2306 a8083063 Iustin Pop
  def CheckPrereq(self):
2307 a8083063 Iustin Pop
    """Check prerequisites.
2308 a8083063 Iustin Pop

2309 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2310 a8083063 Iustin Pop

2311 a8083063 Iustin Pop
    """
2312 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2313 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2314 a8083063 Iustin Pop
    if instance is None:
2315 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2316 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2317 a8083063 Iustin Pop
    self.instance = instance
2318 a8083063 Iustin Pop
2319 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2320 a8083063 Iustin Pop
    """Remove the instance.
2321 a8083063 Iustin Pop

2322 a8083063 Iustin Pop
    """
2323 a8083063 Iustin Pop
    instance = self.instance
2324 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2325 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2326 a8083063 Iustin Pop
2327 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2328 1d67656e Iustin Pop
      if self.op.ignore_failures:
2329 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2330 1d67656e Iustin Pop
      else:
2331 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2332 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2333 a8083063 Iustin Pop
2334 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2335 a8083063 Iustin Pop
2336 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2337 1d67656e Iustin Pop
      if self.op.ignore_failures:
2338 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2339 1d67656e Iustin Pop
      else:
2340 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2341 a8083063 Iustin Pop
2342 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2343 a8083063 Iustin Pop
2344 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2345 a8083063 Iustin Pop
2346 a8083063 Iustin Pop
2347 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2348 a8083063 Iustin Pop
  """Logical unit for querying instances.
2349 a8083063 Iustin Pop

2350 a8083063 Iustin Pop
  """
2351 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2352 a8083063 Iustin Pop
2353 a8083063 Iustin Pop
  def CheckPrereq(self):
2354 a8083063 Iustin Pop
    """Check prerequisites.
2355 a8083063 Iustin Pop

2356 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2357 a8083063 Iustin Pop

2358 a8083063 Iustin Pop
    """
2359 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2360 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2361 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2362 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2363 d6d415e8 Iustin Pop
                               "sda_size", "sdb_size", "vcpus"],
2364 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2365 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2366 a8083063 Iustin Pop
2367 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2368 069dcc86 Iustin Pop
2369 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2370 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2371 a8083063 Iustin Pop

2372 a8083063 Iustin Pop
    """
2373 069dcc86 Iustin Pop
    instance_names = self.wanted
2374 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2375 a8083063 Iustin Pop
                     in instance_names]
2376 a8083063 Iustin Pop
2377 a8083063 Iustin Pop
    # begin data gathering
2378 a8083063 Iustin Pop
2379 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2380 a8083063 Iustin Pop
2381 a8083063 Iustin Pop
    bad_nodes = []
2382 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2383 a8083063 Iustin Pop
      live_data = {}
2384 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2385 a8083063 Iustin Pop
      for name in nodes:
2386 a8083063 Iustin Pop
        result = node_data[name]
2387 a8083063 Iustin Pop
        if result:
2388 a8083063 Iustin Pop
          live_data.update(result)
2389 a8083063 Iustin Pop
        elif result == False:
2390 a8083063 Iustin Pop
          bad_nodes.append(name)
2391 a8083063 Iustin Pop
        # else no instance is alive
2392 a8083063 Iustin Pop
    else:
2393 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2394 a8083063 Iustin Pop
2395 a8083063 Iustin Pop
    # end data gathering
2396 a8083063 Iustin Pop
2397 a8083063 Iustin Pop
    output = []
2398 a8083063 Iustin Pop
    for instance in instance_list:
2399 a8083063 Iustin Pop
      iout = []
2400 a8083063 Iustin Pop
      for field in self.op.output_fields:
2401 a8083063 Iustin Pop
        if field == "name":
2402 a8083063 Iustin Pop
          val = instance.name
2403 a8083063 Iustin Pop
        elif field == "os":
2404 a8083063 Iustin Pop
          val = instance.os
2405 a8083063 Iustin Pop
        elif field == "pnode":
2406 a8083063 Iustin Pop
          val = instance.primary_node
2407 a8083063 Iustin Pop
        elif field == "snodes":
2408 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2409 a8083063 Iustin Pop
        elif field == "admin_state":
2410 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2411 a8083063 Iustin Pop
        elif field == "oper_state":
2412 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2413 8a23d2d3 Iustin Pop
            val = None
2414 a8083063 Iustin Pop
          else:
2415 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2416 d8052456 Iustin Pop
        elif field == "status":
2417 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2418 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2419 d8052456 Iustin Pop
          else:
2420 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2421 d8052456 Iustin Pop
            if running:
2422 d8052456 Iustin Pop
              if instance.status != "down":
2423 d8052456 Iustin Pop
                val = "running"
2424 d8052456 Iustin Pop
              else:
2425 d8052456 Iustin Pop
                val = "ERROR_up"
2426 d8052456 Iustin Pop
            else:
2427 d8052456 Iustin Pop
              if instance.status != "down":
2428 d8052456 Iustin Pop
                val = "ERROR_down"
2429 d8052456 Iustin Pop
              else:
2430 d8052456 Iustin Pop
                val = "ADMIN_down"
2431 a8083063 Iustin Pop
        elif field == "admin_ram":
2432 a8083063 Iustin Pop
          val = instance.memory
2433 a8083063 Iustin Pop
        elif field == "oper_ram":
2434 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2435 8a23d2d3 Iustin Pop
            val = None
2436 a8083063 Iustin Pop
          elif instance.name in live_data:
2437 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2438 a8083063 Iustin Pop
          else:
2439 a8083063 Iustin Pop
            val = "-"
2440 a8083063 Iustin Pop
        elif field == "disk_template":
2441 a8083063 Iustin Pop
          val = instance.disk_template
2442 a8083063 Iustin Pop
        elif field == "ip":
2443 a8083063 Iustin Pop
          val = instance.nics[0].ip
2444 a8083063 Iustin Pop
        elif field == "bridge":
2445 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2446 a8083063 Iustin Pop
        elif field == "mac":
2447 a8083063 Iustin Pop
          val = instance.nics[0].mac
2448 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2449 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2450 644eeef9 Iustin Pop
          if disk is None:
2451 8a23d2d3 Iustin Pop
            val = None
2452 644eeef9 Iustin Pop
          else:
2453 644eeef9 Iustin Pop
            val = disk.size
2454 d6d415e8 Iustin Pop
        elif field == "vcpus":
2455 d6d415e8 Iustin Pop
          val = instance.vcpus
2456 a8083063 Iustin Pop
        else:
2457 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2458 a8083063 Iustin Pop
        iout.append(val)
2459 a8083063 Iustin Pop
      output.append(iout)
2460 a8083063 Iustin Pop
2461 a8083063 Iustin Pop
    return output
2462 a8083063 Iustin Pop
2463 a8083063 Iustin Pop
2464 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2465 a8083063 Iustin Pop
  """Failover an instance.
2466 a8083063 Iustin Pop

2467 a8083063 Iustin Pop
  """
2468 a8083063 Iustin Pop
  HPATH = "instance-failover"
2469 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2470 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2471 a8083063 Iustin Pop
2472 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2473 a8083063 Iustin Pop
    """Build hooks env.
2474 a8083063 Iustin Pop

2475 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2476 a8083063 Iustin Pop

2477 a8083063 Iustin Pop
    """
2478 a8083063 Iustin Pop
    env = {
2479 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2480 a8083063 Iustin Pop
      }
2481 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2482 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2483 a8083063 Iustin Pop
    return env, nl, nl
2484 a8083063 Iustin Pop
2485 a8083063 Iustin Pop
  def CheckPrereq(self):
2486 a8083063 Iustin Pop
    """Check prerequisites.
2487 a8083063 Iustin Pop

2488 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2489 a8083063 Iustin Pop

2490 a8083063 Iustin Pop
    """
2491 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2492 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2493 a8083063 Iustin Pop
    if instance is None:
2494 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2495 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2496 a8083063 Iustin Pop
2497 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2498 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2499 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2500 2a710df1 Michael Hanselmann
2501 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2502 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2503 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2504 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2505 2a710df1 Michael Hanselmann
2506 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2507 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2508 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2509 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2510 3a7c308e Guido Trotter
2511 a8083063 Iustin Pop
    # check bridge existance
2512 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2513 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2514 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2515 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2516 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2517 a8083063 Iustin Pop
2518 a8083063 Iustin Pop
    self.instance = instance
2519 a8083063 Iustin Pop
2520 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2521 a8083063 Iustin Pop
    """Failover an instance.
2522 a8083063 Iustin Pop

2523 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2524 a8083063 Iustin Pop
    starting it on the secondary.
2525 a8083063 Iustin Pop

2526 a8083063 Iustin Pop
    """
2527 a8083063 Iustin Pop
    instance = self.instance
2528 a8083063 Iustin Pop
2529 a8083063 Iustin Pop
    source_node = instance.primary_node
2530 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2531 a8083063 Iustin Pop
2532 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2533 a8083063 Iustin Pop
    for dev in instance.disks:
2534 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2535 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2536 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2537 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2538 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2539 a8083063 Iustin Pop
2540 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2541 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2542 a8083063 Iustin Pop
                (instance.name, source_node))
2543 a8083063 Iustin Pop
2544 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2545 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2546 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2547 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2548 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2549 24a40d57 Iustin Pop
      else:
2550 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2551 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2552 a8083063 Iustin Pop
2553 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2554 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2555 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2556 a8083063 Iustin Pop
2557 a8083063 Iustin Pop
    instance.primary_node = target_node
2558 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2559 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2560 a8083063 Iustin Pop
2561 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2562 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2563 a8083063 Iustin Pop
                (instance.name, target_node))
2564 a8083063 Iustin Pop
2565 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2566 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2567 a8083063 Iustin Pop
    if not disks_ok:
2568 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2569 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't activate the instance's disks")
2570 a8083063 Iustin Pop
2571 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2572 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2573 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2574 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2575 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2576 a8083063 Iustin Pop
2577 a8083063 Iustin Pop
2578 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2579 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2580 a8083063 Iustin Pop

2581 a8083063 Iustin Pop
  This always creates all devices.
2582 a8083063 Iustin Pop

2583 a8083063 Iustin Pop
  """
2584 a8083063 Iustin Pop
  if device.children:
2585 a8083063 Iustin Pop
    for child in device.children:
2586 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2587 a8083063 Iustin Pop
        return False
2588 a8083063 Iustin Pop
2589 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2590 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2591 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2592 a8083063 Iustin Pop
  if not new_id:
2593 a8083063 Iustin Pop
    return False
2594 a8083063 Iustin Pop
  if device.physical_id is None:
2595 a8083063 Iustin Pop
    device.physical_id = new_id
2596 a8083063 Iustin Pop
  return True
2597 a8083063 Iustin Pop
2598 a8083063 Iustin Pop
2599 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2600 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2601 a8083063 Iustin Pop

2602 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2603 a8083063 Iustin Pop
  all its children.
2604 a8083063 Iustin Pop

2605 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2606 a8083063 Iustin Pop

2607 a8083063 Iustin Pop
  """
2608 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2609 a8083063 Iustin Pop
    force = True
2610 a8083063 Iustin Pop
  if device.children:
2611 a8083063 Iustin Pop
    for child in device.children:
2612 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2613 3f78eef2 Iustin Pop
                                        child, force, info):
2614 a8083063 Iustin Pop
        return False
2615 a8083063 Iustin Pop
2616 a8083063 Iustin Pop
  if not force:
2617 a8083063 Iustin Pop
    return True
2618 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2619 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2620 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2621 a8083063 Iustin Pop
  if not new_id:
2622 a8083063 Iustin Pop
    return False
2623 a8083063 Iustin Pop
  if device.physical_id is None:
2624 a8083063 Iustin Pop
    device.physical_id = new_id
2625 a8083063 Iustin Pop
  return True
2626 a8083063 Iustin Pop
2627 a8083063 Iustin Pop
2628 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2629 923b1523 Iustin Pop
  """Generate a suitable LV name.
2630 923b1523 Iustin Pop

2631 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2632 923b1523 Iustin Pop

2633 923b1523 Iustin Pop
  """
2634 923b1523 Iustin Pop
  results = []
2635 923b1523 Iustin Pop
  for val in exts:
2636 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2637 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2638 923b1523 Iustin Pop
  return results
2639 923b1523 Iustin Pop
2640 923b1523 Iustin Pop
2641 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2642 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2643 a8083063 Iustin Pop

2644 a8083063 Iustin Pop
  """
2645 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2646 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2647 fe96220b Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2648 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2649 fe96220b Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2650 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2651 fe96220b Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
2652 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2653 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2654 a8083063 Iustin Pop
  return drbd_dev
2655 a8083063 Iustin Pop
2656 a8083063 Iustin Pop
2657 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2658 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2659 a1f445d3 Iustin Pop

2660 a1f445d3 Iustin Pop
  """
2661 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2662 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2663 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2664 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2665 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2666 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2667 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2668 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2669 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2670 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2671 a1f445d3 Iustin Pop
  return drbd_dev
2672 a1f445d3 Iustin Pop
2673 7c0d6283 Michael Hanselmann
2674 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2675 a8083063 Iustin Pop
                          instance_name, primary_node,
2676 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2677 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2678 a8083063 Iustin Pop

2679 a8083063 Iustin Pop
  """
2680 a8083063 Iustin Pop
  #TODO: compute space requirements
2681 a8083063 Iustin Pop
2682 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2683 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2684 a8083063 Iustin Pop
    disks = []
2685 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2686 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2687 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2688 923b1523 Iustin Pop
2689 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2690 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2691 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2692 a8083063 Iustin Pop
                           iv_name = "sda")
2693 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2694 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2695 a8083063 Iustin Pop
                           iv_name = "sdb")
2696 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2697 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_LOCAL_RAID1:
2698 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2699 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2700 923b1523 Iustin Pop
2701 923b1523 Iustin Pop
2702 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2703 923b1523 Iustin Pop
                                       ".sdb_m1", ".sdb_m2"])
2704 fe96220b Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2705 923b1523 Iustin Pop
                              logical_id=(vgname, names[0]))
2706 fe96220b Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2707 923b1523 Iustin Pop
                              logical_id=(vgname, names[1]))
2708 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
2709 a8083063 Iustin Pop
                              size=disk_sz,
2710 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2711 fe96220b Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2712 923b1523 Iustin Pop
                              logical_id=(vgname, names[2]))
2713 fe96220b Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2714 923b1523 Iustin Pop
                              logical_id=(vgname, names[3]))
2715 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
2716 a8083063 Iustin Pop
                              size=swap_sz,
2717 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2718 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2719 2a710df1 Michael Hanselmann
  elif template_name == constants.DT_REMOTE_RAID1:
2720 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2721 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2722 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2723 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2724 923b1523 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2725 923b1523 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2726 923b1523 Iustin Pop
                                         disk_sz, names[0:2])
2727 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
2728 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2729 923b1523 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2730 923b1523 Iustin Pop
                                         swap_sz, names[2:4])
2731 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
2732 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2733 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2734 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2735 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2736 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2737 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2738 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2739 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2740 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2741 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2742 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2743 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2744 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2745 a8083063 Iustin Pop
  else:
2746 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2747 a8083063 Iustin Pop
  return disks
2748 a8083063 Iustin Pop
2749 a8083063 Iustin Pop
2750 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2751 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2752 3ecf6786 Iustin Pop

2753 3ecf6786 Iustin Pop
  """
2754 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2755 a0c3fea1 Michael Hanselmann
2756 a0c3fea1 Michael Hanselmann
2757 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2758 a8083063 Iustin Pop
  """Create all disks for an instance.
2759 a8083063 Iustin Pop

2760 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2761 a8083063 Iustin Pop

2762 a8083063 Iustin Pop
  Args:
2763 a8083063 Iustin Pop
    instance: the instance object
2764 a8083063 Iustin Pop

2765 a8083063 Iustin Pop
  Returns:
2766 a8083063 Iustin Pop
    True or False showing the success of the creation process
2767 a8083063 Iustin Pop

2768 a8083063 Iustin Pop
  """
2769 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2770 a0c3fea1 Michael Hanselmann
2771 a8083063 Iustin Pop
  for device in instance.disks:
2772 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2773 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2774 a8083063 Iustin Pop
    #HARDCODE
2775 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2776 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2777 3f78eef2 Iustin Pop
                                        device, False, info):
2778 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2779 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2780 a8083063 Iustin Pop
        return False
2781 a8083063 Iustin Pop
    #HARDCODE
2782 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2783 3f78eef2 Iustin Pop
                                    instance, device, info):
2784 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2785 a8083063 Iustin Pop
                   device.iv_name)
2786 a8083063 Iustin Pop
      return False
2787 a8083063 Iustin Pop
  return True
2788 a8083063 Iustin Pop
2789 a8083063 Iustin Pop
2790 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2791 a8083063 Iustin Pop
  """Remove all disks for an instance.
2792 a8083063 Iustin Pop

2793 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2794 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2795 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2796 a8083063 Iustin Pop
  with `_CreateDisks()`).
2797 a8083063 Iustin Pop

2798 a8083063 Iustin Pop
  Args:
2799 a8083063 Iustin Pop
    instance: the instance object
2800 a8083063 Iustin Pop

2801 a8083063 Iustin Pop
  Returns:
2802 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2803 a8083063 Iustin Pop

2804 a8083063 Iustin Pop
  """
2805 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2806 a8083063 Iustin Pop
2807 a8083063 Iustin Pop
  result = True
2808 a8083063 Iustin Pop
  for device in instance.disks:
2809 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2810 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2811 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2812 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2813 a8083063 Iustin Pop
                     " continuing anyway" %
2814 a8083063 Iustin Pop
                     (device.iv_name, node))
2815 a8083063 Iustin Pop
        result = False
2816 a8083063 Iustin Pop
  return result
2817 a8083063 Iustin Pop
2818 a8083063 Iustin Pop
2819 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2820 a8083063 Iustin Pop
  """Create an instance.
2821 a8083063 Iustin Pop

2822 a8083063 Iustin Pop
  """
2823 a8083063 Iustin Pop
  HPATH = "instance-add"
2824 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2825 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2826 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2827 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
2828 a8083063 Iustin Pop
2829 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2830 a8083063 Iustin Pop
    """Build hooks env.
2831 a8083063 Iustin Pop

2832 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2833 a8083063 Iustin Pop

2834 a8083063 Iustin Pop
    """
2835 a8083063 Iustin Pop
    env = {
2836 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2837 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2838 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2839 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2840 a8083063 Iustin Pop
      }
2841 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2842 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2843 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2844 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2845 396e1b78 Michael Hanselmann
2846 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2847 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2848 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2849 396e1b78 Michael Hanselmann
      status=self.instance_status,
2850 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2851 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2852 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2853 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
2854 396e1b78 Michael Hanselmann
    ))
2855 a8083063 Iustin Pop
2856 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2857 a8083063 Iustin Pop
          self.secondaries)
2858 a8083063 Iustin Pop
    return env, nl, nl
2859 a8083063 Iustin Pop
2860 a8083063 Iustin Pop
2861 a8083063 Iustin Pop
  def CheckPrereq(self):
2862 a8083063 Iustin Pop
    """Check prerequisites.
2863 a8083063 Iustin Pop

2864 a8083063 Iustin Pop
    """
2865 40ed12dd Guido Trotter
    for attr in ["kernel_path", "initrd_path", "hvm_boot_order"]:
2866 40ed12dd Guido Trotter
      if not hasattr(self.op, attr):
2867 40ed12dd Guido Trotter
        setattr(self.op, attr, None)
2868 40ed12dd Guido Trotter
2869 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2870 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2871 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2872 3ecf6786 Iustin Pop
                                 self.op.mode)
2873 a8083063 Iustin Pop
2874 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2875 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2876 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2877 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2878 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2879 3ecf6786 Iustin Pop
                                   " node and path options")
2880 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2881 a8083063 Iustin Pop
      if src_node_full is None:
2882 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2883 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2884 a8083063 Iustin Pop
2885 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2886 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2887 a8083063 Iustin Pop
2888 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2889 a8083063 Iustin Pop
2890 a8083063 Iustin Pop
      if not export_info:
2891 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2892 a8083063 Iustin Pop
2893 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2894 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2895 a8083063 Iustin Pop
2896 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2897 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2898 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2899 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
2900 a8083063 Iustin Pop
2901 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2902 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
2903 3ecf6786 Iustin Pop
                                   " one data disk")
2904 a8083063 Iustin Pop
2905 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2906 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2907 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2908 a8083063 Iustin Pop
                                                         'disk0_dump'))
2909 a8083063 Iustin Pop
      self.src_image = diskimage
2910 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2911 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2912 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
2913 a8083063 Iustin Pop
2914 a8083063 Iustin Pop
    # check primary node
2915 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2916 a8083063 Iustin Pop
    if pnode is None:
2917 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
2918 3ecf6786 Iustin Pop
                                 self.op.pnode)
2919 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2920 a8083063 Iustin Pop
    self.pnode = pnode
2921 a8083063 Iustin Pop
    self.secondaries = []
2922 a8083063 Iustin Pop
    # disk template and mirror node verification
2923 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2924 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
2925 a8083063 Iustin Pop
2926 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
2927 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2928 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
2929 3ecf6786 Iustin Pop
                                   " a mirror node")
2930 a8083063 Iustin Pop
2931 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2932 a8083063 Iustin Pop
      if snode_name is None:
2933 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
2934 3ecf6786 Iustin Pop
                                   self.op.snode)
2935 a8083063 Iustin Pop
      elif snode_name == pnode.name:
2936 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
2937 3ecf6786 Iustin Pop
                                   " the primary node.")
2938 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
2939 a8083063 Iustin Pop
2940 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
2941 ed1ebc60 Guido Trotter
    req_size_dict = {
2942 8d75db10 Iustin Pop
      constants.DT_DISKLESS: None,
2943 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
2944 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
2945 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
2946 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
2947 a1f445d3 Iustin Pop
      constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256,
2948 ed1ebc60 Guido Trotter
    }
2949 ed1ebc60 Guido Trotter
2950 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
2951 3ecf6786 Iustin Pop
      raise errors.ProgrammerError("Disk template '%s' size requirement"
2952 3ecf6786 Iustin Pop
                                   " is unknown" %  self.op.disk_template)
2953 ed1ebc60 Guido Trotter
2954 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
2955 ed1ebc60 Guido Trotter
2956 8d75db10 Iustin Pop
    # Check lv size requirements
2957 8d75db10 Iustin Pop
    if req_size is not None:
2958 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
2959 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
2960 8d75db10 Iustin Pop
      for node in nodenames:
2961 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
2962 8d75db10 Iustin Pop
        if not info:
2963 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
2964 8d75db10 Iustin Pop
                                     " from node '%s'" % nodeinfo)
2965 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
2966 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
2967 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
2968 8d75db10 Iustin Pop
                                     " node %s" % node)
2969 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
2970 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
2971 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
2972 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
2973 ed1ebc60 Guido Trotter
2974 a8083063 Iustin Pop
    # os verification
2975 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2976 dfa96ded Guido Trotter
    if not os_obj:
2977 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
2978 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
2979 a8083063 Iustin Pop
2980 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
2981 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
2982 3b6d8c9b Iustin Pop
2983 a8083063 Iustin Pop
    # instance verification
2984 89e1fc26 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
2985 a8083063 Iustin Pop
2986 bcf043c9 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
2987 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2988 a8083063 Iustin Pop
    if instance_name in instance_list:
2989 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2990 3ecf6786 Iustin Pop
                                 instance_name)
2991 a8083063 Iustin Pop
2992 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
2993 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
2994 a8083063 Iustin Pop
      inst_ip = None
2995 a8083063 Iustin Pop
    elif ip.lower() == "auto":
2996 bcf043c9 Iustin Pop
      inst_ip = hostname1.ip
2997 a8083063 Iustin Pop
    else:
2998 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
2999 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3000 3ecf6786 Iustin Pop
                                   " like a valid IP" % ip)
3001 a8083063 Iustin Pop
      inst_ip = ip
3002 a8083063 Iustin Pop
    self.inst_ip = inst_ip
3003 a8083063 Iustin Pop
3004 bdd55f71 Iustin Pop
    if self.op.start and not self.op.ip_check:
3005 bdd55f71 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3006 bdd55f71 Iustin Pop
                                 " adding an instance in start mode")
3007 bdd55f71 Iustin Pop
3008 bdd55f71 Iustin Pop
    if self.op.ip_check:
3009 b15d625f Iustin Pop
      if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT):
3010 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3011 16abfbc2 Alexander Schreiber
                                   (hostname1.ip, instance_name))
3012 a8083063 Iustin Pop
3013 1862d460 Alexander Schreiber
    # MAC address verification
3014 1862d460 Alexander Schreiber
    if self.op.mac != "auto":
3015 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.op.mac.lower()):
3016 1862d460 Alexander Schreiber
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3017 1862d460 Alexander Schreiber
                                   self.op.mac)
3018 1862d460 Alexander Schreiber
3019 a8083063 Iustin Pop
    # bridge verification
3020 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3021 a8083063 Iustin Pop
    if bridge is None:
3022 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3023 a8083063 Iustin Pop
    else:
3024 a8083063 Iustin Pop
      self.op.bridge = bridge
3025 a8083063 Iustin Pop
3026 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3027 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3028 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3029 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3030 a8083063 Iustin Pop
3031 25c5878d Alexander Schreiber
    # boot order verification
3032 25c5878d Alexander Schreiber
    if self.op.hvm_boot_order is not None:
3033 25c5878d Alexander Schreiber
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3034 b08d5a87 Iustin Pop
        raise errors.OpPrereqError("invalid boot order specified,"
3035 b08d5a87 Iustin Pop
                                   " must be one or more of [acdn]")
3036 25c5878d Alexander Schreiber
3037 a8083063 Iustin Pop
    if self.op.start:
3038 a8083063 Iustin Pop
      self.instance_status = 'up'
3039 a8083063 Iustin Pop
    else:
3040 a8083063 Iustin Pop
      self.instance_status = 'down'
3041 a8083063 Iustin Pop
3042 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3043 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3044 a8083063 Iustin Pop

3045 a8083063 Iustin Pop
    """
3046 a8083063 Iustin Pop
    instance = self.op.instance_name
3047 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3048 a8083063 Iustin Pop
3049 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3050 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3051 1862d460 Alexander Schreiber
    else:
3052 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3053 1862d460 Alexander Schreiber
3054 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3055 a8083063 Iustin Pop
    if self.inst_ip is not None:
3056 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3057 a8083063 Iustin Pop
3058 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3059 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3060 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3061 2a6469d5 Alexander Schreiber
    else:
3062 2a6469d5 Alexander Schreiber
      network_port = None
3063 58acb49d Alexander Schreiber
3064 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3065 a8083063 Iustin Pop
                                  self.op.disk_template,
3066 a8083063 Iustin Pop
                                  instance, pnode_name,
3067 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3068 a8083063 Iustin Pop
                                  self.op.swap_size)
3069 a8083063 Iustin Pop
3070 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3071 a8083063 Iustin Pop
                            primary_node=pnode_name,
3072 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3073 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3074 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3075 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3076 a8083063 Iustin Pop
                            status=self.instance_status,
3077 58acb49d Alexander Schreiber
                            network_port=network_port,
3078 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3079 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3080 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3081 a8083063 Iustin Pop
                            )
3082 a8083063 Iustin Pop
3083 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3084 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3085 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3086 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3087 a8083063 Iustin Pop
3088 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3089 a8083063 Iustin Pop
3090 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3091 a8083063 Iustin Pop
3092 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3093 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3094 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3095 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3096 a8083063 Iustin Pop
      time.sleep(15)
3097 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3098 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3099 a8083063 Iustin Pop
    else:
3100 a8083063 Iustin Pop
      disk_abort = False
3101 a8083063 Iustin Pop
3102 a8083063 Iustin Pop
    if disk_abort:
3103 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3104 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3105 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3106 3ecf6786 Iustin Pop
                               " this instance")
3107 a8083063 Iustin Pop
3108 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3109 a8083063 Iustin Pop
                (instance, pnode_name))
3110 a8083063 Iustin Pop
3111 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3112 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3113 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3114 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3115 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3116 3ecf6786 Iustin Pop
                                   " on node %s" %
3117 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3118 a8083063 Iustin Pop
3119 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3120 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3121 a8083063 Iustin Pop
        src_node = self.op.src_node
3122 a8083063 Iustin Pop
        src_image = self.src_image
3123 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3124 a8083063 Iustin Pop
                                                src_node, src_image):
3125 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3126 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3127 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3128 a8083063 Iustin Pop
      else:
3129 a8083063 Iustin Pop
        # also checked in the prereq part
3130 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3131 3ecf6786 Iustin Pop
                                     % self.op.mode)
3132 a8083063 Iustin Pop
3133 a8083063 Iustin Pop
    if self.op.start:
3134 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3135 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3136 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3137 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3138 a8083063 Iustin Pop
3139 a8083063 Iustin Pop
3140 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3141 a8083063 Iustin Pop
  """Connect to an instance's console.
3142 a8083063 Iustin Pop

3143 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3144 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3145 a8083063 Iustin Pop
  console.
3146 a8083063 Iustin Pop

3147 a8083063 Iustin Pop
  """
3148 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3149 a8083063 Iustin Pop
3150 a8083063 Iustin Pop
  def CheckPrereq(self):
3151 a8083063 Iustin Pop
    """Check prerequisites.
3152 a8083063 Iustin Pop

3153 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3154 a8083063 Iustin Pop

3155 a8083063 Iustin Pop
    """
3156 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3157 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3158 a8083063 Iustin Pop
    if instance is None:
3159 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3160 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3161 a8083063 Iustin Pop
    self.instance = instance
3162 a8083063 Iustin Pop
3163 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3164 a8083063 Iustin Pop
    """Connect to the console of an instance
3165 a8083063 Iustin Pop

3166 a8083063 Iustin Pop
    """
3167 a8083063 Iustin Pop
    instance = self.instance
3168 a8083063 Iustin Pop
    node = instance.primary_node
3169 a8083063 Iustin Pop
3170 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3171 a8083063 Iustin Pop
    if node_insts is False:
3172 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3173 a8083063 Iustin Pop
3174 a8083063 Iustin Pop
    if instance.name not in node_insts:
3175 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3176 a8083063 Iustin Pop
3177 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3178 a8083063 Iustin Pop
3179 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3180 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3181 b047857b Michael Hanselmann
3182 82122173 Iustin Pop
    # build ssh cmdline
3183 b047857b Michael Hanselmann
    cmd = self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3184 b047857b Michael Hanselmann
    return cmd[0], cmd
3185 a8083063 Iustin Pop
3186 a8083063 Iustin Pop
3187 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
3188 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
3189 a8083063 Iustin Pop

3190 a8083063 Iustin Pop
  """
3191 a8083063 Iustin Pop
  HPATH = "mirror-add"
3192 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3193 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
3194 a8083063 Iustin Pop
3195 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3196 a8083063 Iustin Pop
    """Build hooks env.
3197 a8083063 Iustin Pop

3198 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3199 a8083063 Iustin Pop

3200 a8083063 Iustin Pop
    """
3201 a8083063 Iustin Pop
    env = {
3202 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3203 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3204 a8083063 Iustin Pop
      }
3205 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3206 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3207 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
3208 a8083063 Iustin Pop
    return env, nl, nl
3209 a8083063 Iustin Pop
3210 a8083063 Iustin Pop
  def CheckPrereq(self):
3211 a8083063 Iustin Pop
    """Check prerequisites.
3212 a8083063 Iustin Pop

3213 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3214 a8083063 Iustin Pop

3215 a8083063 Iustin Pop
    """
3216 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3217 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3218 a8083063 Iustin Pop
    if instance is None:
3219 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3220 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3221 a8083063 Iustin Pop
    self.instance = instance
3222 a8083063 Iustin Pop
3223 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3224 a8083063 Iustin Pop
    if remote_node is None:
3225 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
3226 a8083063 Iustin Pop
    self.remote_node = remote_node
3227 a8083063 Iustin Pop
3228 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3229 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3230 3ecf6786 Iustin Pop
                                 " the instance.")
3231 a8083063 Iustin Pop
3232 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3233 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3234 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3235 a8083063 Iustin Pop
    for disk in instance.disks:
3236 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3237 a8083063 Iustin Pop
        break
3238 a8083063 Iustin Pop
    else:
3239 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3240 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3241 a8083063 Iustin Pop
    if len(disk.children) > 1:
3242 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("The device already has two slave devices."
3243 f4bc1f2c Michael Hanselmann
                                 " This would create a 3-disk raid1 which we"
3244 f4bc1f2c Michael Hanselmann
                                 " don't allow.")
3245 a8083063 Iustin Pop
    self.disk = disk
3246 a8083063 Iustin Pop
3247 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3248 a8083063 Iustin Pop
    """Add the mirror component
3249 a8083063 Iustin Pop

3250 a8083063 Iustin Pop
    """
3251 a8083063 Iustin Pop
    disk = self.disk
3252 a8083063 Iustin Pop
    instance = self.instance
3253 a8083063 Iustin Pop
3254 a8083063 Iustin Pop
    remote_node = self.remote_node
3255 923b1523 Iustin Pop
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
3256 923b1523 Iustin Pop
    names = _GenerateUniqueNames(self.cfg, lv_names)
3257 923b1523 Iustin Pop
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
3258 923b1523 Iustin Pop
                                     remote_node, disk.size, names)
3259 a8083063 Iustin Pop
3260 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
3261 a8083063 Iustin Pop
    #HARDCODE
3262 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, instance,
3263 3f78eef2 Iustin Pop
                                      new_drbd, False,
3264 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3265 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create new component on secondary"
3266 3ecf6786 Iustin Pop
                               " node %s" % remote_node)
3267 a8083063 Iustin Pop
3268 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
3269 a8083063 Iustin Pop
    #HARDCODE
3270 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node,
3271 3f78eef2 Iustin Pop
                                    instance, new_drbd,
3272 a0c3fea1 Michael Hanselmann
                                    _GetInstanceInfoText(instance)):
3273 a8083063 Iustin Pop
      # remove secondary dev
3274 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3275 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
3276 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create volume on primary")
3277 a8083063 Iustin Pop
3278 a8083063 Iustin Pop
    # the device exists now
3279 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
3280 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
3281 153d9724 Iustin Pop
    if not rpc.call_blockdev_addchildren(instance.primary_node,
3282 153d9724 Iustin Pop
                                         disk, [new_drbd]):
3283 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
3284 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3285 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
3286 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
3287 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
3288 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3289 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
3290 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't add mirror component to md array")
3291 a8083063 Iustin Pop
3292 a8083063 Iustin Pop
    disk.children.append(new_drbd)
3293 a8083063 Iustin Pop
3294 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3295 a8083063 Iustin Pop
3296 5bfac263 Iustin Pop
    _WaitForSync(self.cfg, instance, self.proc)
3297 a8083063 Iustin Pop
3298 a8083063 Iustin Pop
    return 0
3299 a8083063 Iustin Pop
3300 a8083063 Iustin Pop
3301 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
3302 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
3303 a8083063 Iustin Pop

3304 a8083063 Iustin Pop
  """
3305 a8083063 Iustin Pop
  HPATH = "mirror-remove"
3306 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3307 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
3308 a8083063 Iustin Pop
3309 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3310 a8083063 Iustin Pop
    """Build hooks env.
3311 a8083063 Iustin Pop

3312 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3313 a8083063 Iustin Pop

3314 a8083063 Iustin Pop
    """
3315 a8083063 Iustin Pop
    env = {
3316 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3317 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
3318 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
3319 a8083063 Iustin Pop
      }
3320 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3321 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3322 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3323 a8083063 Iustin Pop
    return env, nl, nl
3324 a8083063 Iustin Pop
3325 a8083063 Iustin Pop
  def CheckPrereq(self):
3326 a8083063 Iustin Pop
    """Check prerequisites.
3327 a8083063 Iustin Pop

3328 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3329 a8083063 Iustin Pop

3330 a8083063 Iustin Pop
    """
3331 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3332 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3333 a8083063 Iustin Pop
    if instance is None:
3334 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3335 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3336 a8083063 Iustin Pop
    self.instance = instance
3337 a8083063 Iustin Pop
3338 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3339 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3340 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3341 a8083063 Iustin Pop
    for disk in instance.disks:
3342 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3343 a8083063 Iustin Pop
        break
3344 a8083063 Iustin Pop
    else:
3345 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3346 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3347 a8083063 Iustin Pop
    for child in disk.children:
3348 fe96220b Iustin Pop
      if (child.dev_type == constants.LD_DRBD7 and
3349 fe96220b Iustin Pop
          child.logical_id[2] == self.op.disk_id):
3350 a8083063 Iustin Pop
        break
3351 a8083063 Iustin Pop
    else:
3352 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find the device with this port.")
3353 a8083063 Iustin Pop
3354 a8083063 Iustin Pop
    if len(disk.children) < 2:
3355 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot remove the last component from"
3356 3ecf6786 Iustin Pop
                                 " a mirror.")
3357 a8083063 Iustin Pop
    self.disk = disk
3358 a8083063 Iustin Pop
    self.child = child
3359 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
3360 a8083063 Iustin Pop
      oid = 1
3361 a8083063 Iustin Pop
    else:
3362 a8083063 Iustin Pop
      oid = 0
3363 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
3364 a8083063 Iustin Pop
3365 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3366 a8083063 Iustin Pop
    """Remove the mirror component
3367 a8083063 Iustin Pop

3368 a8083063 Iustin Pop
    """
3369 a8083063 Iustin Pop
    instance = self.instance
3370 a8083063 Iustin Pop
    disk = self.disk
3371 a8083063 Iustin Pop
    child = self.child
3372 a8083063 Iustin Pop
    logger.Info("remove mirror component")
3373 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
3374 153d9724 Iustin Pop
    if not rpc.call_blockdev_removechildren(instance.primary_node,
3375 153d9724 Iustin Pop
                                            disk, [child]):
3376 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't remove child from mirror.")
3377 a8083063 Iustin Pop
3378 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
3379 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
3380 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
3381 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
3382 a8083063 Iustin Pop
                     " continuing operation." % node)
3383 a8083063 Iustin Pop
3384 a8083063 Iustin Pop
    disk.children.remove(child)
3385 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3386 a8083063 Iustin Pop
3387 a8083063 Iustin Pop
3388 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3389 a8083063 Iustin Pop
  """Replace the disks of an instance.
3390 a8083063 Iustin Pop

3391 a8083063 Iustin Pop
  """
3392 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3393 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3394 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3395 a8083063 Iustin Pop
3396 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3397 a8083063 Iustin Pop
    """Build hooks env.
3398 a8083063 Iustin Pop

3399 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3400 a8083063 Iustin Pop

3401 a8083063 Iustin Pop
    """
3402 a8083063 Iustin Pop
    env = {
3403 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3404 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3405 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3406 a8083063 Iustin Pop
      }
3407 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3408 0834c866 Iustin Pop
    nl = [
3409 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3410 0834c866 Iustin Pop
      self.instance.primary_node,
3411 0834c866 Iustin Pop
      ]
3412 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3413 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3414 a8083063 Iustin Pop
    return env, nl, nl
3415 a8083063 Iustin Pop
3416 a8083063 Iustin Pop
  def CheckPrereq(self):
3417 a8083063 Iustin Pop
    """Check prerequisites.
3418 a8083063 Iustin Pop

3419 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3420 a8083063 Iustin Pop

3421 a8083063 Iustin Pop
    """
3422 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3423 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3424 a8083063 Iustin Pop
    if instance is None:
3425 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3426 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3427 a8083063 Iustin Pop
    self.instance = instance
3428 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3429 a8083063 Iustin Pop
3430 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3431 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3432 a9e0c397 Iustin Pop
                                 " network mirrored.")
3433 a8083063 Iustin Pop
3434 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3435 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3436 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3437 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3438 a8083063 Iustin Pop
3439 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3440 a9e0c397 Iustin Pop
3441 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
3442 a9e0c397 Iustin Pop
    if remote_node is not None:
3443 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3444 a8083063 Iustin Pop
      if remote_node is None:
3445 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3446 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3447 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3448 a9e0c397 Iustin Pop
    else:
3449 a9e0c397 Iustin Pop
      self.remote_node_info = None
3450 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3451 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3452 3ecf6786 Iustin Pop
                                 " the instance.")
3453 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3454 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3455 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3456 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3457 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3458 0834c866 Iustin Pop
                                   " replacement")
3459 a9e0c397 Iustin Pop
      # the user gave the current secondary, switch to
3460 0834c866 Iustin Pop
      # 'no-replace-secondary' mode for drbd7
3461 a9e0c397 Iustin Pop
      remote_node = None
3462 a9e0c397 Iustin Pop
    if (instance.disk_template == constants.DT_REMOTE_RAID1 and
3463 a9e0c397 Iustin Pop
        self.op.mode != constants.REPLACE_DISK_ALL):
3464 a9e0c397 Iustin Pop
      raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
3465 a9e0c397 Iustin Pop
                                 " disks replacement, not individual ones")
3466 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3467 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3468 7df43a76 Iustin Pop
          remote_node is not None):
3469 7df43a76 Iustin Pop
        # switch to replace secondary mode
3470 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3471 7df43a76 Iustin Pop
3472 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3473 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3474 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3475 a9e0c397 Iustin Pop
                                   " both at once")
3476 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3477 a9e0c397 Iustin Pop
        if remote_node is not None:
3478 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3479 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3480 a9e0c397 Iustin Pop
                                     " node disk replacement")
3481 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3482 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3483 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3484 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3485 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3486 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3487 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3488 a9e0c397 Iustin Pop
      else:
3489 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3490 a9e0c397 Iustin Pop
3491 a9e0c397 Iustin Pop
    for name in self.op.disks:
3492 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3493 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3494 a9e0c397 Iustin Pop
                                   (name, instance.name))
3495 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3496 a8083063 Iustin Pop
3497 a9e0c397 Iustin Pop
  def _ExecRR1(self, feedback_fn):
3498 a8083063 Iustin Pop
    """Replace the disks of an instance.
3499 a8083063 Iustin Pop

3500 a8083063 Iustin Pop
    """
3501 a8083063 Iustin Pop
    instance = self.instance
3502 a8083063 Iustin Pop
    iv_names = {}
3503 a8083063 Iustin Pop
    # start of work
3504 a9e0c397 Iustin Pop
    if self.op.remote_node is None:
3505 a9e0c397 Iustin Pop
      remote_node = self.sec_node
3506 a9e0c397 Iustin Pop
    else:
3507 a9e0c397 Iustin Pop
      remote_node = self.op.remote_node
3508 a8083063 Iustin Pop
    cfg = self.cfg
3509 a8083063 Iustin Pop
    for dev in instance.disks:
3510 a8083063 Iustin Pop
      size = dev.size
3511 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3512 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3513 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3514 923b1523 Iustin Pop
                                       remote_node, size, names)
3515 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3516 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3517 a8083063 Iustin Pop
                  dev.iv_name)
3518 a8083063 Iustin Pop
      #HARDCODE
3519 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
3520 3f78eef2 Iustin Pop
                                        new_drbd, False,
3521 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3522 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create new component on secondary"
3523 f4bc1f2c Michael Hanselmann
                                 " node %s. Full abort, cleanup manually!" %
3524 3ecf6786 Iustin Pop
                                 remote_node)
3525 a8083063 Iustin Pop
3526 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3527 a8083063 Iustin Pop
      #HARDCODE
3528 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3529 3f78eef2 Iustin Pop
                                      instance, new_drbd,
3530 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3531 a8083063 Iustin Pop
        # remove secondary dev
3532 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3533 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3534 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create volume on primary!"
3535 f4bc1f2c Michael Hanselmann
                                 " Full abort, cleanup manually!!")
3536 a8083063 Iustin Pop
3537 a8083063 Iustin Pop
      # the device exists now
3538 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3539 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3540 153d9724 Iustin Pop
      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
3541 153d9724 Iustin Pop
                                           [new_drbd]):
3542 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3543 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3544 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3545 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3546 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3547 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3548 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3549 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3550 a8083063 Iustin Pop
3551 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3552 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3553 a8083063 Iustin Pop
3554 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3555 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3556 a8083063 Iustin Pop
    # return value
3557 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3558 a8083063 Iustin Pop
3559 a8083063 Iustin Pop
    # so check manually all the devices
3560 a8083063 Iustin Pop
    for name in iv_names:
3561 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3562 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3563 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3564 a8083063 Iustin Pop
      if is_degr:
3565 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3566 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3567 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3568 a8083063 Iustin Pop
      if is_degr:
3569 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3570 a8083063 Iustin Pop
3571 a8083063 Iustin Pop
    for name in iv_names:
3572 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3573 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3574 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3575 153d9724 Iustin Pop
      if not rpc.call_blockdev_removechildren(instance.primary_node,
3576 153d9724 Iustin Pop
                                              dev, [child]):
3577 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3578 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3579 a8083063 Iustin Pop
        continue
3580 a8083063 Iustin Pop
3581 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3582 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3583 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3584 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3585 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3586 a8083063 Iustin Pop
                       " continuing operation." % node)
3587 a8083063 Iustin Pop
3588 a8083063 Iustin Pop
      dev.children.remove(child)
3589 a8083063 Iustin Pop
3590 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3591 a8083063 Iustin Pop
3592 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3593 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3594 a9e0c397 Iustin Pop

3595 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3596 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3597 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3598 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3599 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3600 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3601 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3602 a9e0c397 Iustin Pop
      - wait for sync across all devices
3603 a9e0c397 Iustin Pop
      - for each modified disk:
3604 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3605 a9e0c397 Iustin Pop

3606 a9e0c397 Iustin Pop
    Failures are not very well handled.
3607 cff90b79 Iustin Pop

3608 a9e0c397 Iustin Pop
    """
3609 cff90b79 Iustin Pop
    steps_total = 6
3610 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3611 a9e0c397 Iustin Pop
    instance = self.instance
3612 a9e0c397 Iustin Pop
    iv_names = {}
3613 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3614 a9e0c397 Iustin Pop
    # start of work
3615 a9e0c397 Iustin Pop
    cfg = self.cfg
3616 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3617 cff90b79 Iustin Pop
    oth_node = self.oth_node
3618 cff90b79 Iustin Pop
3619 cff90b79 Iustin Pop
    # Step: check device activation
3620 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3621 cff90b79 Iustin Pop
    info("checking volume groups")
3622 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3623 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3624 cff90b79 Iustin Pop
    if not results:
3625 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3626 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3627 cff90b79 Iustin Pop
      res = results.get(node, False)
3628 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3629 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3630 cff90b79 Iustin Pop
                                 (my_vg, node))
3631 cff90b79 Iustin Pop
    for dev in instance.disks:
3632 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3633 cff90b79 Iustin Pop
        continue
3634 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3635 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3636 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3637 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3638 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3639 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3640 cff90b79 Iustin Pop
3641 cff90b79 Iustin Pop
    # Step: check other node consistency
3642 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3643 cff90b79 Iustin Pop
    for dev in instance.disks:
3644 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3645 cff90b79 Iustin Pop
        continue
3646 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3647 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3648 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3649 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3650 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3651 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3652 cff90b79 Iustin Pop
3653 cff90b79 Iustin Pop
    # Step: create new storage
3654 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3655 a9e0c397 Iustin Pop
    for dev in instance.disks:
3656 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3657 a9e0c397 Iustin Pop
        continue
3658 a9e0c397 Iustin Pop
      size = dev.size
3659 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3660 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3661 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3662 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3663 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3664 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3665 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3666 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3667 a9e0c397 Iustin Pop
      old_lvs = dev.children
3668 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3669 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3670 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3671 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3672 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3673 a9e0c397 Iustin Pop
      # are talking about the secondary node
3674 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3675 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3676 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3677 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3678 a9e0c397 Iustin Pop
                                   " node '%s'" %
3679 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3680 a9e0c397 Iustin Pop
3681 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3682 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3683 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3684 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3685 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3686 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3687 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3688 cff90b79 Iustin Pop
      #dev.children = []
3689 cff90b79 Iustin Pop
      #cfg.Update(instance)
3690 a9e0c397 Iustin Pop
3691 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3692 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3693 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3694 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3695 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3696 cff90b79 Iustin Pop
3697 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3698 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3699 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3700 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3701 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3702 cff90b79 Iustin Pop
      rlist = []
3703 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3704 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3705 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3706 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3707 cff90b79 Iustin Pop
3708 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3709 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3710 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3711 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3712 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3713 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3714 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3715 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3716 cff90b79 Iustin Pop
3717 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3718 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3719 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3720 a9e0c397 Iustin Pop
3721 cff90b79 Iustin Pop
      for disk in old_lvs:
3722 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3723 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3724 a9e0c397 Iustin Pop
3725 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3726 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3727 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3728 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3729 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3730 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3731 cff90b79 Iustin Pop
                    " logical volumes")
3732 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3733 a9e0c397 Iustin Pop
3734 a9e0c397 Iustin Pop
      dev.children = new_lvs
3735 a9e0c397 Iustin Pop
      cfg.Update(instance)
3736 a9e0c397 Iustin Pop
3737 cff90b79 Iustin Pop
    # Step: wait for sync
3738 a9e0c397 Iustin Pop
3739 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3740 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3741 a9e0c397 Iustin Pop
    # return value
3742 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3743 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3744 a9e0c397 Iustin Pop
3745 a9e0c397 Iustin Pop
    # so check manually all the devices
3746 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3747 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3748 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3749 a9e0c397 Iustin Pop
      if is_degr:
3750 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3751 a9e0c397 Iustin Pop
3752 cff90b79 Iustin Pop
    # Step: remove old storage
3753 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3754 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3755 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3756 a9e0c397 Iustin Pop
      for lv in old_lvs:
3757 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3758 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3759 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3760 a9e0c397 Iustin Pop
          continue
3761 a9e0c397 Iustin Pop
3762 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3763 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3764 a9e0c397 Iustin Pop

3765 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3766 a9e0c397 Iustin Pop
      - for all disks of the instance:
3767 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3768 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3769 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3770 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3771 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3772 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3773 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3774 a9e0c397 Iustin Pop
          not network enabled
3775 a9e0c397 Iustin Pop
      - wait for sync across all devices
3776 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3777 a9e0c397 Iustin Pop

3778 a9e0c397 Iustin Pop
    Failures are not very well handled.
3779 0834c866 Iustin Pop

3780 a9e0c397 Iustin Pop
    """
3781 0834c866 Iustin Pop
    steps_total = 6
3782 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3783 a9e0c397 Iustin Pop
    instance = self.instance
3784 a9e0c397 Iustin Pop
    iv_names = {}
3785 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3786 a9e0c397 Iustin Pop
    # start of work
3787 a9e0c397 Iustin Pop
    cfg = self.cfg
3788 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3789 a9e0c397 Iustin Pop
    new_node = self.new_node
3790 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3791 0834c866 Iustin Pop
3792 0834c866 Iustin Pop
    # Step: check device activation
3793 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3794 0834c866 Iustin Pop
    info("checking volume groups")
3795 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3796 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3797 0834c866 Iustin Pop
    if not results:
3798 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3799 0834c866 Iustin Pop
    for node in pri_node, new_node:
3800 0834c866 Iustin Pop
      res = results.get(node, False)
3801 0834c866 Iustin Pop
      if not res or my_vg not in res:
3802 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3803 0834c866 Iustin Pop
                                 (my_vg, node))
3804 0834c866 Iustin Pop
    for dev in instance.disks:
3805 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3806 0834c866 Iustin Pop
        continue
3807 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3808 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3809 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3810 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3811 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3812 0834c866 Iustin Pop
3813 0834c866 Iustin Pop
    # Step: check other node consistency
3814 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3815 0834c866 Iustin Pop
    for dev in instance.disks:
3816 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3817 0834c866 Iustin Pop
        continue
3818 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3819 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3820 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3821 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3822 0834c866 Iustin Pop
                                 pri_node)
3823 0834c866 Iustin Pop
3824 0834c866 Iustin Pop
    # Step: create new storage
3825 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3826 a9e0c397 Iustin Pop
    for dev in instance.disks:
3827 a9e0c397 Iustin Pop
      size = dev.size
3828 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3829 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3830 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3831 a9e0c397 Iustin Pop
      # are talking about the secondary node
3832 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3833 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3834 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3835 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3836 a9e0c397 Iustin Pop
                                   " node '%s'" %
3837 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3838 a9e0c397 Iustin Pop
3839 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3840 0834c866 Iustin Pop
3841 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3842 0834c866 Iustin Pop
    for dev in instance.disks:
3843 0834c866 Iustin Pop
      size = dev.size
3844 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3845 a9e0c397 Iustin Pop
      # create new devices on new_node
3846 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3847 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3848 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3849 a9e0c397 Iustin Pop
                              children=dev.children)
3850 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3851 3f78eef2 Iustin Pop
                                        new_drbd, False,
3852 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3853 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3854 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3855 a9e0c397 Iustin Pop
3856 0834c866 Iustin Pop
    for dev in instance.disks:
3857 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3858 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3859 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3860 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3861 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3862 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
3863 a9e0c397 Iustin Pop
3864 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
3865 642445d9 Iustin Pop
    done = 0
3866 642445d9 Iustin Pop
    for dev in instance.disks:
3867 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3868 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
3869 642445d9 Iustin Pop
      # detach from network
3870 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
3871 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
3872 642445d9 Iustin Pop
      # standalone state
3873 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
3874 642445d9 Iustin Pop
        done += 1
3875 642445d9 Iustin Pop
      else:
3876 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
3877 642445d9 Iustin Pop
                dev.iv_name)
3878 642445d9 Iustin Pop
3879 642445d9 Iustin Pop
    if not done:
3880 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
3881 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3882 642445d9 Iustin Pop
3883 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
3884 642445d9 Iustin Pop
    # the instance to point to the new secondary
3885 642445d9 Iustin Pop
    info("updating instance configuration")
3886 642445d9 Iustin Pop
    for dev in instance.disks:
3887 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3888 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3889 642445d9 Iustin Pop
    cfg.Update(instance)
3890 a9e0c397 Iustin Pop
3891 642445d9 Iustin Pop
    # and now perform the drbd attach
3892 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
3893 642445d9 Iustin Pop
    failures = []
3894 642445d9 Iustin Pop
    for dev in instance.disks:
3895 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3896 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
3897 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
3898 642445d9 Iustin Pop
      # is correct
3899 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3900 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3901 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3902 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
3903 a9e0c397 Iustin Pop
3904 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3905 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3906 a9e0c397 Iustin Pop
    # return value
3907 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3908 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3909 a9e0c397 Iustin Pop
3910 a9e0c397 Iustin Pop
    # so check manually all the devices
3911 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3912 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3913 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3914 a9e0c397 Iustin Pop
      if is_degr:
3915 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3916 a9e0c397 Iustin Pop
3917 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3918 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3919 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
3920 a9e0c397 Iustin Pop
      for lv in old_lvs:
3921 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3922 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3923 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
3924 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
3925 a9e0c397 Iustin Pop
3926 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3927 a9e0c397 Iustin Pop
    """Execute disk replacement.
3928 a9e0c397 Iustin Pop

3929 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3930 a9e0c397 Iustin Pop

3931 a9e0c397 Iustin Pop
    """
3932 a9e0c397 Iustin Pop
    instance = self.instance
3933 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_REMOTE_RAID1:
3934 a9e0c397 Iustin Pop
      fn = self._ExecRR1
3935 a9e0c397 Iustin Pop
    elif instance.disk_template == constants.DT_DRBD8:
3936 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
3937 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
3938 a9e0c397 Iustin Pop
      else:
3939 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
3940 a9e0c397 Iustin Pop
    else:
3941 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
3942 a9e0c397 Iustin Pop
    return fn(feedback_fn)
3943 a9e0c397 Iustin Pop
3944 a8083063 Iustin Pop
3945 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3946 a8083063 Iustin Pop
  """Query runtime instance data.
3947 a8083063 Iustin Pop

3948 a8083063 Iustin Pop
  """
3949 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3950 a8083063 Iustin Pop
3951 a8083063 Iustin Pop
  def CheckPrereq(self):
3952 a8083063 Iustin Pop
    """Check prerequisites.
3953 a8083063 Iustin Pop

3954 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3955 a8083063 Iustin Pop

3956 a8083063 Iustin Pop
    """
3957 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3958 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
3959 a8083063 Iustin Pop
    if self.op.instances:
3960 a8083063 Iustin Pop
      self.wanted_instances = []
3961 a8083063 Iustin Pop
      names = self.op.instances
3962 a8083063 Iustin Pop
      for name in names:
3963 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3964 a8083063 Iustin Pop
        if instance is None:
3965 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
3966 515207af Guido Trotter
        self.wanted_instances.append(instance)
3967 a8083063 Iustin Pop
    else:
3968 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3969 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3970 a8083063 Iustin Pop
    return
3971 a8083063 Iustin Pop
3972 a8083063 Iustin Pop
3973 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3974 a8083063 Iustin Pop
    """Compute block device status.
3975 a8083063 Iustin Pop

3976 a8083063 Iustin Pop
    """
3977 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3978 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3979 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
3980 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3981 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3982 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3983 a8083063 Iustin Pop
      else:
3984 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3985 a8083063 Iustin Pop
3986 a8083063 Iustin Pop
    if snode:
3987 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
3988 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3989 a8083063 Iustin Pop
    else:
3990 a8083063 Iustin Pop
      dev_sstatus = None
3991 a8083063 Iustin Pop
3992 a8083063 Iustin Pop
    if dev.children:
3993 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3994 a8083063 Iustin Pop
                      for child in dev.children]
3995 a8083063 Iustin Pop
    else:
3996 a8083063 Iustin Pop
      dev_children = []
3997 a8083063 Iustin Pop
3998 a8083063 Iustin Pop
    data = {
3999 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4000 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4001 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4002 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4003 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4004 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4005 a8083063 Iustin Pop
      "children": dev_children,
4006 a8083063 Iustin Pop
      }
4007 a8083063 Iustin Pop
4008 a8083063 Iustin Pop
    return data
4009 a8083063 Iustin Pop
4010 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4011 a8083063 Iustin Pop
    """Gather and return data"""
4012 a8083063 Iustin Pop
    result = {}
4013 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4014 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4015 a8083063 Iustin Pop
                                                instance.name)
4016 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4017 a8083063 Iustin Pop
        remote_state = "up"
4018 a8083063 Iustin Pop
      else:
4019 a8083063 Iustin Pop
        remote_state = "down"
4020 a8083063 Iustin Pop
      if instance.status == "down":
4021 a8083063 Iustin Pop
        config_state = "down"
4022 a8083063 Iustin Pop
      else:
4023 a8083063 Iustin Pop
        config_state = "up"
4024 a8083063 Iustin Pop
4025 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4026 a8083063 Iustin Pop
               for device in instance.disks]
4027 a8083063 Iustin Pop
4028 a8083063 Iustin Pop
      idict = {
4029 a8083063 Iustin Pop
        "name": instance.name,
4030 a8083063 Iustin Pop
        "config_state": config_state,
4031 a8083063 Iustin Pop
        "run_state": remote_state,
4032 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4033 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4034 a8083063 Iustin Pop
        "os": instance.os,
4035 a8083063 Iustin Pop
        "memory": instance.memory,
4036 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4037 a8083063 Iustin Pop
        "disks": disks,
4038 58acb49d Alexander Schreiber
        "network_port": instance.network_port,
4039 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4040 71aa8f73 Iustin Pop
        "kernel_path": instance.kernel_path,
4041 71aa8f73 Iustin Pop
        "initrd_path": instance.initrd_path,
4042 8ae6bb54 Iustin Pop
        "hvm_boot_order": instance.hvm_boot_order,
4043 a8083063 Iustin Pop
        }
4044 a8083063 Iustin Pop
4045 a8083063 Iustin Pop
      result[instance.name] = idict
4046 a8083063 Iustin Pop
4047 a8083063 Iustin Pop
    return result
4048 a8083063 Iustin Pop
4049 a8083063 Iustin Pop
4050 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
4051 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4052 a8083063 Iustin Pop

4053 a8083063 Iustin Pop
  """
4054 a8083063 Iustin Pop
  HPATH = "instance-modify"
4055 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4056 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4057 a8083063 Iustin Pop
4058 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4059 a8083063 Iustin Pop
    """Build hooks env.
4060 a8083063 Iustin Pop

4061 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4062 a8083063 Iustin Pop

4063 a8083063 Iustin Pop
    """
4064 396e1b78 Michael Hanselmann
    args = dict()
4065 a8083063 Iustin Pop
    if self.mem:
4066 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4067 a8083063 Iustin Pop
    if self.vcpus:
4068 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4069 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4070 396e1b78 Michael Hanselmann
      if self.do_ip:
4071 396e1b78 Michael Hanselmann
        ip = self.ip
4072 396e1b78 Michael Hanselmann
      else:
4073 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4074 396e1b78 Michael Hanselmann
      if self.bridge:
4075 396e1b78 Michael Hanselmann
        bridge = self.bridge
4076 396e1b78 Michael Hanselmann
      else:
4077 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4078 ef756965 Iustin Pop
      if self.mac:
4079 ef756965 Iustin Pop
        mac = self.mac
4080 ef756965 Iustin Pop
      else:
4081 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4082 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4083 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4084 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4085 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4086 a8083063 Iustin Pop
    return env, nl, nl
4087 a8083063 Iustin Pop
4088 a8083063 Iustin Pop
  def CheckPrereq(self):
4089 a8083063 Iustin Pop
    """Check prerequisites.
4090 a8083063 Iustin Pop

4091 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4092 a8083063 Iustin Pop

4093 a8083063 Iustin Pop
    """
4094 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4095 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4096 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4097 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4098 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4099 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4100 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4101 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4102 973d7867 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4103 25c5878d Alexander Schreiber
                 self.kernel_path, self.initrd_path, self.hvm_boot_order]
4104 973d7867 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4105 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4106 a8083063 Iustin Pop
    if self.mem is not None:
4107 a8083063 Iustin Pop
      try:
4108 a8083063 Iustin Pop
        self.mem = int(self.mem)
4109 a8083063 Iustin Pop
      except ValueError, err:
4110 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4111 a8083063 Iustin Pop
    if self.vcpus is not None:
4112 a8083063 Iustin Pop
      try:
4113 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4114 a8083063 Iustin Pop
      except ValueError, err:
4115 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4116 a8083063 Iustin Pop
    if self.ip is not None:
4117 a8083063 Iustin Pop
      self.do_ip = True
4118 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4119 a8083063 Iustin Pop
        self.ip = None
4120 a8083063 Iustin Pop
      else:
4121 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4122 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4123 a8083063 Iustin Pop
    else:
4124 a8083063 Iustin Pop
      self.do_ip = False
4125 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4126 1862d460 Alexander Schreiber
    if self.mac is not None:
4127 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4128 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4129 1862d460 Alexander Schreiber
                                   self.mac)
4130 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4131 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4132 a8083063 Iustin Pop
4133 973d7867 Iustin Pop
    if self.kernel_path is not None:
4134 973d7867 Iustin Pop
      self.do_kernel_path = True
4135 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4136 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4137 973d7867 Iustin Pop
4138 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4139 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4140 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4141 973d7867 Iustin Pop
                                    " filename")
4142 8cafeb26 Iustin Pop
    else:
4143 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4144 973d7867 Iustin Pop
4145 973d7867 Iustin Pop
    if self.initrd_path is not None:
4146 973d7867 Iustin Pop
      self.do_initrd_path = True
4147 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4148 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4149 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4150 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4151 973d7867 Iustin Pop
                                    " filename")
4152 8cafeb26 Iustin Pop
    else:
4153 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4154 973d7867 Iustin Pop
4155 25c5878d Alexander Schreiber
    # boot order verification
4156 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4157 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4158 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4159 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4160 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4161 25c5878d Alexander Schreiber
                                     " or 'default'")
4162 25c5878d Alexander Schreiber
4163 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4164 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
4165 a8083063 Iustin Pop
    if instance is None:
4166 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
4167 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4168 a8083063 Iustin Pop
    self.op.instance_name = instance.name
4169 a8083063 Iustin Pop
    self.instance = instance
4170 a8083063 Iustin Pop
    return
4171 a8083063 Iustin Pop
4172 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4173 a8083063 Iustin Pop
    """Modifies an instance.
4174 a8083063 Iustin Pop

4175 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4176 a8083063 Iustin Pop
    """
4177 a8083063 Iustin Pop
    result = []
4178 a8083063 Iustin Pop
    instance = self.instance
4179 a8083063 Iustin Pop
    if self.mem:
4180 a8083063 Iustin Pop
      instance.memory = self.mem
4181 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4182 a8083063 Iustin Pop
    if self.vcpus:
4183 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4184 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4185 a8083063 Iustin Pop
    if self.do_ip:
4186 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4187 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4188 a8083063 Iustin Pop
    if self.bridge:
4189 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4190 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4191 1862d460 Alexander Schreiber
    if self.mac:
4192 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4193 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4194 973d7867 Iustin Pop
    if self.do_kernel_path:
4195 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4196 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4197 973d7867 Iustin Pop
    if self.do_initrd_path:
4198 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4199 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4200 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4201 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4202 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4203 25c5878d Alexander Schreiber
      else:
4204 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4205 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4206 a8083063 Iustin Pop
4207 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
4208 a8083063 Iustin Pop
4209 a8083063 Iustin Pop
    return result
4210 a8083063 Iustin Pop
4211 a8083063 Iustin Pop
4212 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4213 a8083063 Iustin Pop
  """Query the exports list
4214 a8083063 Iustin Pop

4215 a8083063 Iustin Pop
  """
4216 a8083063 Iustin Pop
  _OP_REQP = []
4217 a8083063 Iustin Pop
4218 a8083063 Iustin Pop
  def CheckPrereq(self):
4219 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4220 a8083063 Iustin Pop

4221 a8083063 Iustin Pop
    """
4222 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4223 a8083063 Iustin Pop
4224 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4225 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4226 a8083063 Iustin Pop

4227 a8083063 Iustin Pop
    Returns:
4228 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4229 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4230 a8083063 Iustin Pop
      that node.
4231 a8083063 Iustin Pop

4232 a8083063 Iustin Pop
    """
4233 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4234 a8083063 Iustin Pop
4235 a8083063 Iustin Pop
4236 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4237 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4238 a8083063 Iustin Pop

4239 a8083063 Iustin Pop
  """
4240 a8083063 Iustin Pop
  HPATH = "instance-export"
4241 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4242 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4243 a8083063 Iustin Pop
4244 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4245 a8083063 Iustin Pop
    """Build hooks env.
4246 a8083063 Iustin Pop

4247 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4248 a8083063 Iustin Pop

4249 a8083063 Iustin Pop
    """
4250 a8083063 Iustin Pop
    env = {
4251 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4252 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4253 a8083063 Iustin Pop
      }
4254 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4255 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4256 a8083063 Iustin Pop
          self.op.target_node]
4257 a8083063 Iustin Pop
    return env, nl, nl
4258 a8083063 Iustin Pop
4259 a8083063 Iustin Pop
  def CheckPrereq(self):
4260 a8083063 Iustin Pop
    """Check prerequisites.
4261 a8083063 Iustin Pop

4262 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
4263 a8083063 Iustin Pop

4264 a8083063 Iustin Pop
    """
4265 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4266 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4267 a8083063 Iustin Pop
    if self.instance is None:
4268 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4269 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4270 a8083063 Iustin Pop
4271 a8083063 Iustin Pop
    # node verification
4272 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4273 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4274 a8083063 Iustin Pop
4275 a8083063 Iustin Pop
    if self.dst_node is None:
4276 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4277 3ecf6786 Iustin Pop
                                 self.op.target_node)
4278 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4279 a8083063 Iustin Pop
4280 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4281 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4282 a8083063 Iustin Pop

4283 a8083063 Iustin Pop
    """
4284 a8083063 Iustin Pop
    instance = self.instance
4285 a8083063 Iustin Pop
    dst_node = self.dst_node
4286 a8083063 Iustin Pop
    src_node = instance.primary_node
4287 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
4288 a8083063 Iustin Pop
    if self.op.shutdown:
4289 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
4290 5bfac263 Iustin Pop
      self.proc.ChainOpCode(op)
4291 a8083063 Iustin Pop
4292 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4293 a8083063 Iustin Pop
4294 a8083063 Iustin Pop
    snap_disks = []
4295 a8083063 Iustin Pop
4296 a8083063 Iustin Pop
    try:
4297 a8083063 Iustin Pop
      for disk in instance.disks:
4298 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4299 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4300 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4301 a8083063 Iustin Pop
4302 a8083063 Iustin Pop
          if not new_dev_name:
4303 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4304 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4305 a8083063 Iustin Pop
          else:
4306 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4307 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4308 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4309 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4310 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4311 a8083063 Iustin Pop
4312 a8083063 Iustin Pop
    finally:
4313 a8083063 Iustin Pop
      if self.op.shutdown:
4314 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
4315 a8083063 Iustin Pop
                                       force=False)
4316 5bfac263 Iustin Pop
        self.proc.ChainOpCode(op)
4317 a8083063 Iustin Pop
4318 a8083063 Iustin Pop
    # TODO: check for size
4319 a8083063 Iustin Pop
4320 a8083063 Iustin Pop
    for dev in snap_disks:
4321 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
4322 a8083063 Iustin Pop
                                           instance):
4323 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
4324 a8083063 Iustin Pop
                     " %s to node %s" %
4325 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
4326 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4327 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
4328 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
4329 a8083063 Iustin Pop
4330 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4331 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4332 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4333 a8083063 Iustin Pop
4334 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4335 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4336 a8083063 Iustin Pop
4337 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4338 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4339 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4340 a8083063 Iustin Pop
    if nodelist:
4341 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4342 5bfac263 Iustin Pop
      exportlist = self.proc.ChainOpCode(op)
4343 a8083063 Iustin Pop
      for node in exportlist:
4344 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4345 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4346 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4347 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4348 5c947f38 Iustin Pop
4349 5c947f38 Iustin Pop
4350 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4351 5c947f38 Iustin Pop
  """Generic tags LU.
4352 5c947f38 Iustin Pop

4353 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4354 5c947f38 Iustin Pop

4355 5c947f38 Iustin Pop
  """
4356 5c947f38 Iustin Pop
  def CheckPrereq(self):
4357 5c947f38 Iustin Pop
    """Check prerequisites.
4358 5c947f38 Iustin Pop

4359 5c947f38 Iustin Pop
    """
4360 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4361 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4362 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4363 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4364 5c947f38 Iustin Pop
      if name is None:
4365 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4366 3ecf6786 Iustin Pop
                                   (self.op.name,))
4367 5c947f38 Iustin Pop
      self.op.name = name
4368 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4369 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4370 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4371 5c947f38 Iustin Pop
      if name is None:
4372 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4373 3ecf6786 Iustin Pop
                                   (self.op.name,))
4374 5c947f38 Iustin Pop
      self.op.name = name
4375 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4376 5c947f38 Iustin Pop
    else:
4377 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4378 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4379 5c947f38 Iustin Pop
4380 5c947f38 Iustin Pop
4381 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4382 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4383 5c947f38 Iustin Pop

4384 5c947f38 Iustin Pop
  """
4385 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4386 5c947f38 Iustin Pop
4387 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4388 5c947f38 Iustin Pop
    """Returns the tag list.
4389 5c947f38 Iustin Pop

4390 5c947f38 Iustin Pop
    """
4391 5c947f38 Iustin Pop
    return self.target.GetTags()
4392 5c947f38 Iustin Pop
4393 5c947f38 Iustin Pop
4394 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4395 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4396 73415719 Iustin Pop

4397 73415719 Iustin Pop
  """
4398 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4399 73415719 Iustin Pop
4400 73415719 Iustin Pop
  def CheckPrereq(self):
4401 73415719 Iustin Pop
    """Check prerequisites.
4402 73415719 Iustin Pop

4403 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4404 73415719 Iustin Pop

4405 73415719 Iustin Pop
    """
4406 73415719 Iustin Pop
    try:
4407 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4408 73415719 Iustin Pop
    except re.error, err:
4409 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4410 73415719 Iustin Pop
                                 (self.op.pattern, err))
4411 73415719 Iustin Pop
4412 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4413 73415719 Iustin Pop
    """Returns the tag list.
4414 73415719 Iustin Pop

4415 73415719 Iustin Pop
    """
4416 73415719 Iustin Pop
    cfg = self.cfg
4417 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4418 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4419 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4420 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4421 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4422 73415719 Iustin Pop
    results = []
4423 73415719 Iustin Pop
    for path, target in tgts:
4424 73415719 Iustin Pop
      for tag in target.GetTags():
4425 73415719 Iustin Pop
        if self.re.search(tag):
4426 73415719 Iustin Pop
          results.append((path, tag))
4427 73415719 Iustin Pop
    return results
4428 73415719 Iustin Pop
4429 73415719 Iustin Pop
4430 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4431 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4432 5c947f38 Iustin Pop

4433 5c947f38 Iustin Pop
  """
4434 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4435 5c947f38 Iustin Pop
4436 5c947f38 Iustin Pop
  def CheckPrereq(self):
4437 5c947f38 Iustin Pop
    """Check prerequisites.
4438 5c947f38 Iustin Pop

4439 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4440 5c947f38 Iustin Pop

4441 5c947f38 Iustin Pop
    """
4442 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4443 f27302fa Iustin Pop
    for tag in self.op.tags:
4444 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4445 5c947f38 Iustin Pop
4446 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4447 5c947f38 Iustin Pop
    """Sets the tag.
4448 5c947f38 Iustin Pop

4449 5c947f38 Iustin Pop
    """
4450 5c947f38 Iustin Pop
    try:
4451 f27302fa Iustin Pop
      for tag in self.op.tags:
4452 f27302fa Iustin Pop
        self.target.AddTag(tag)
4453 5c947f38 Iustin Pop
    except errors.TagError, err:
4454 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4455 5c947f38 Iustin Pop
    try:
4456 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4457 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4458 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4459 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4460 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4461 5c947f38 Iustin Pop
4462 5c947f38 Iustin Pop
4463 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4464 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4465 5c947f38 Iustin Pop

4466 5c947f38 Iustin Pop
  """
4467 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4468 5c947f38 Iustin Pop
4469 5c947f38 Iustin Pop
  def CheckPrereq(self):
4470 5c947f38 Iustin Pop
    """Check prerequisites.
4471 5c947f38 Iustin Pop

4472 5c947f38 Iustin Pop
    This checks that we have the given tag.
4473 5c947f38 Iustin Pop

4474 5c947f38 Iustin Pop
    """
4475 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4476 f27302fa Iustin Pop
    for tag in self.op.tags:
4477 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4478 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4479 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4480 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4481 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4482 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4483 f27302fa Iustin Pop
      diff_names.sort()
4484 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4485 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4486 5c947f38 Iustin Pop
4487 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4488 5c947f38 Iustin Pop
    """Remove the tag from the object.
4489 5c947f38 Iustin Pop

4490 5c947f38 Iustin Pop
    """
4491 f27302fa Iustin Pop
    for tag in self.op.tags:
4492 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4493 5c947f38 Iustin Pop
    try:
4494 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4495 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4496 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4497 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4498 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4499 06009e27 Iustin Pop
4500 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
4501 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
4502 06009e27 Iustin Pop

4503 06009e27 Iustin Pop
  This LU sleeps on the master and/or nodes for a specified amoutn of
4504 06009e27 Iustin Pop
  time.
4505 06009e27 Iustin Pop

4506 06009e27 Iustin Pop
  """
4507 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
4508 06009e27 Iustin Pop
4509 06009e27 Iustin Pop
  def CheckPrereq(self):
4510 06009e27 Iustin Pop
    """Check prerequisites.
4511 06009e27 Iustin Pop

4512 06009e27 Iustin Pop
    This checks that we have a good list of nodes and/or the duration
4513 06009e27 Iustin Pop
    is valid.
4514 06009e27 Iustin Pop

4515 06009e27 Iustin Pop
    """
4516 06009e27 Iustin Pop
4517 06009e27 Iustin Pop
    if self.op.on_nodes:
4518 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
4519 06009e27 Iustin Pop
4520 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
4521 06009e27 Iustin Pop
    """Do the actual sleep.
4522 06009e27 Iustin Pop

4523 06009e27 Iustin Pop
    """
4524 06009e27 Iustin Pop
    if self.op.on_master:
4525 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
4526 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
4527 06009e27 Iustin Pop
    if self.op.on_nodes:
4528 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
4529 06009e27 Iustin Pop
      if not result:
4530 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
4531 06009e27 Iustin Pop
      for node, node_result in result.items():
4532 06009e27 Iustin Pop
        if not node_result:
4533 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
4534 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))